summaryrefslogtreecommitdiffstats
path: root/import-layers/yocto-poky/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'import-layers/yocto-poky/scripts')
-rwxr-xr-ximport-layers/yocto-poky/scripts/bitbake-whatchanged41
-rwxr-xr-ximport-layers/yocto-poky/scripts/buildhistory-collect-srcrevs4
-rwxr-xr-ximport-layers/yocto-poky/scripts/buildhistory-diff4
-rwxr-xr-ximport-layers/yocto-poky/scripts/buildstats-diff564
-rwxr-xr-ximport-layers/yocto-poky/scripts/cleanup-workdir26
-rwxr-xr-ximport-layers/yocto-poky/scripts/combo-layer540
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/bbvars.py62
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh153
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/ddimage6
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/devtool-stress.py68
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/graph-tool15
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py55
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/mkefidisk.sh4
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py6
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py44
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/uncovered39
-rwxr-xr-ximport-layers/yocto-poky/scripts/contrib/verify-homepage.py24
-rwxr-xr-ximport-layers/yocto-poky/scripts/cp-noerror10
-rwxr-xr-ximport-layers/yocto-poky/scripts/create-pull-request87
-rwxr-xr-ximport-layers/yocto-poky/scripts/devtool37
-rwxr-xr-ximport-layers/yocto-poky/scripts/gen-lockedsig-cache25
-rw-r--r--import-layers/yocto-poky/scripts/lib/argparse_oe.py64
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/engine.py110
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/help.py18
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/kernel.py160
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf12
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc10
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend62
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend61
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc2
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall4
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend33
-rw-r--r--import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend32
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/__init__.py51
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/build.py2
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/build_image.py136
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/deploy.py143
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/runqemu.py15
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/sdk.py188
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/standard.py752
-rw-r--r--import-layers/yocto-poky/scripts/lib/devtool/upgrade.py127
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/append.py15
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create.py422
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys.py86
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py100
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py2
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create_kmod.py4
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py125
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/newappend.py15
-rw-r--r--import-layers/yocto-poky/scripts/lib/recipetool/setvar.py2
-rw-r--r--import-layers/yocto-poky/scripts/lib/scriptutils.py15
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks2
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks2
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk.wks2
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkefidisk.wks4
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks2
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks11
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/conf.py2
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/creator.py5
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/engine.py31
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/filemap.py561
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/help.py15
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py5
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/imager/direct.py67
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/ksparser.py31
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/msger.py78
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/partition.py34
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugin.py12
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/pluginbase.py42
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py3
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py19
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py6
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py61
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py9
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/utils/fs_related.py84
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py22
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py52
-rw-r--r--import-layers/yocto-poky/scripts/lib/wic/utils/runner.py6
-rwxr-xr-ximport-layers/yocto-poky/scripts/lnr4
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-build-perf-test211
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-buildenv-internal42
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-check-sstate121
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-git-proxy13
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-gnome-terminal-phonehome10
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-pkgdata-util72
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-publish-sdk2
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-run-native48
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-selftest110
-rwxr-xr-ximport-layers/yocto-poky/scripts/oe-trim-schemas2
-rwxr-xr-ximport-layers/yocto-poky/scripts/oepydevshell-internal.py14
-rwxr-xr-ximport-layers/yocto-poky/scripts/opkg-query-helper.py2
-rw-r--r--import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/main.py.in6
-rw-r--r--import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py3
-rwxr-xr-ximport-layers/yocto-poky/scripts/pythondeps4
-rwxr-xr-ximport-layers/yocto-poky/scripts/recipetool66
-rwxr-xr-ximport-layers/yocto-poky/scripts/relocate_sdk.py2
-rwxr-xr-ximport-layers/yocto-poky/scripts/runqemu1519
-rwxr-xr-ximport-layers/yocto-poky/scripts/runqemu-gen-tapdevs46
-rwxr-xr-ximport-layers/yocto-poky/scripts/runqemu-internal717
-rwxr-xr-ximport-layers/yocto-poky/scripts/send-error-report30
-rwxr-xr-ximport-layers/yocto-poky/scripts/swabber-strace-attach31
-rwxr-xr-ximport-layers/yocto-poky/scripts/sysroot-relativelinks.py2
-rwxr-xr-ximport-layers/yocto-poky/scripts/test-dependencies.sh4
-rwxr-xr-ximport-layers/yocto-poky/scripts/test-remote-image12
-rwxr-xr-ximport-layers/yocto-poky/scripts/tiny/dirsize.py22
-rwxr-xr-ximport-layers/yocto-poky/scripts/tiny/ksize.py44
-rwxr-xr-ximport-layers/yocto-poky/scripts/verify-bashisms116
-rwxr-xr-ximport-layers/yocto-poky/scripts/wic50
-rwxr-xr-ximport-layers/yocto-poky/scripts/wipe-sysroot2
-rwxr-xr-ximport-layers/yocto-poky/scripts/yocto-bsp151
-rwxr-xr-ximport-layers/yocto-poky/scripts/yocto-kernel4
-rwxr-xr-ximport-layers/yocto-poky/scripts/yocto-layer4
134 files changed, 6365 insertions, 3234 deletions
diff --git a/import-layers/yocto-poky/scripts/bitbake-whatchanged b/import-layers/yocto-poky/scripts/bitbake-whatchanged
index af54d16f8..0207777e6 100755
--- a/import-layers/yocto-poky/scripts/bitbake-whatchanged
+++ b/import-layers/yocto-poky/scripts/bitbake-whatchanged
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
@@ -17,7 +17,6 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-from __future__ import print_function
import os
import sys
import getopt
@@ -25,7 +24,7 @@ import shutil
import re
import warnings
import subprocess
-from optparse import OptionParser
+import argparse
scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
lib_path = scripts_path + '/lib'
@@ -38,6 +37,8 @@ bitbakepath = scriptpath.add_bitbake_lib_path()
if not bitbakepath:
sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
sys.exit(1)
+scriptpath.add_oe_lib_path()
+import argparse_oe
import bb.siggen
import bb.process
@@ -118,7 +119,7 @@ def print_added(d_new = None, d_old = None):
Print the newly added tasks
"""
added = {}
- for k in d_new.keys():
+ for k in list(d_new.keys()):
if k not in d_old:
# Add the new one to added dict, and remove it from
# d_new, so the remaining ones are the changed ones
@@ -153,7 +154,7 @@ def print_vrchanged(d_new = None, d_old = None, vr = None):
"""
pvchanged = {}
counter = 0
- for k in d_new.keys():
+ for k in list(d_new.keys()):
if d_new.get(k).get(vr) != d_old.get(k).get(vr):
counter += 1
pn, task = split_pntask(k)
@@ -219,9 +220,7 @@ def main():
3) Use bb.siggen.compare_sigfiles to diff the old and new stamps
"""
- parser = OptionParser(
- version = "1.0",
- usage = """%prog [options] [package ...]
+ parser = argparse_oe.ArgumentParser(usage = """%(prog)s [options] [package ...]
print what will be done between the current and last builds, for example:
$ bitbake core-image-sato
@@ -236,17 +235,9 @@ Note:
The "nostamp" task is not included.
"""
)
- parser.add_option("-v", "--verbose", help = "print the verbose changes",
- action = "store_true", dest = "verbose")
-
- options, args = parser.parse_args(sys.argv)
-
- verbose = options.verbose
-
- if len(args) != 2:
- parser.error("Incorrect number of arguments")
- else:
- recipe = args[1]
+ parser.add_argument("recipe", help="recipe to check")
+ parser.add_argument("-v", "--verbose", help = "print the verbose changes", action = "store_true")
+ args = parser.parse_args()
# Get the STAMPS_DIR
print("Figuring out the STAMPS_DIR ...")
@@ -256,7 +247,7 @@ Note:
except:
raise
if not stampsdir:
- print("ERROR: No STAMPS_DIR found for '%s'" % recipe, file=sys.stderr)
+ print("ERROR: No STAMPS_DIR found for '%s'" % args.recipe, file=sys.stderr)
return 2
stampsdir = stampsdir.rstrip("\n")
if not os.path.isdir(stampsdir):
@@ -272,7 +263,7 @@ Note:
try:
# Generate the new stamps dir
print("Generating the new stamps ... (need several minutes)")
- cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, recipe)
+ cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, args.recipe)
# FIXME
# The "bitbake -S" may fail, not fatal error, the stamps will still
# be generated, this might be a bug of "bitbake -S".
@@ -287,7 +278,7 @@ Note:
# Remove the same one from both stamps.
cnt_unchanged = 0
- for k in new_dict.keys():
+ for k in list(new_dict.keys()):
if k in old_dict:
cnt_unchanged += 1
del(new_dict[k])
@@ -310,17 +301,17 @@ Note:
# PV (including PE) and PR changed
# Let the bb.siggen handle them if verbose
cnt_rv = {}
- if not verbose:
+ if not args.verbose:
for i in ('pv', 'pr'):
cnt_rv[i] = print_vrchanged(new_recon, old_recon, i)
# Dependencies changed (use bitbake-diffsigs)
- cnt_dep = print_depchanged(new_recon, old_recon, verbose)
+ cnt_dep = print_depchanged(new_recon, old_recon, args.verbose)
total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep
print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged))
- if verbose:
+ if args.verbose:
print("Newly added: %s\nDependencies changed: %s\n" % \
(cnt_added, cnt_dep))
else:
diff --git a/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs b/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
index f3eb76bd0..8a03580f8 100755
--- a/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
+++ b/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Collects the recorded SRCREV values from buildhistory and reports on them
#
@@ -103,7 +103,7 @@ def main():
if options.reportall or value != orig:
all_srcrevs[curdir].append((pn, name, srcrev))
- for curdir, srcrevs in sorted(all_srcrevs.iteritems()):
+ for curdir, srcrevs in sorted(all_srcrevs.items()):
if srcrevs:
print('# %s' % curdir)
for pn, name, srcrev in srcrevs:
diff --git a/import-layers/yocto-poky/scripts/buildhistory-diff b/import-layers/yocto-poky/scripts/buildhistory-diff
index dfebcddf7..d8ca12d3e 100755
--- a/import-layers/yocto-poky/scripts/buildhistory-diff
+++ b/import-layers/yocto-poky/scripts/buildhistory-diff
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Report significant differences in the buildhistory repository since a specific revision
#
@@ -14,7 +14,7 @@ from distutils.version import LooseVersion
try:
import git
except ImportError:
- print("Please install GitPython (python-git) 0.3.1 or later in order to use this script")
+ print("Please install GitPython (python3-git) 0.3.4 or later in order to use this script")
sys.exit(1)
def main():
diff --git a/import-layers/yocto-poky/scripts/buildstats-diff b/import-layers/yocto-poky/scripts/buildstats-diff
new file mode 100755
index 000000000..f918a6d5e
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/buildstats-diff
@@ -0,0 +1,564 @@
+#!/usr/bin/python3
+#
+# Script for comparing buildstats from two different builds
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import glob
+import json
+import logging
+import math
+import os
+import re
+import sys
+from collections import namedtuple
+from datetime import datetime, timedelta, tzinfo
+from operator import attrgetter
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger()
+
+
+class ScriptError(Exception):
+ """Exception for internal error handling of this script"""
+ pass
+
+
+class TimeZone(tzinfo):
+ """Simple fixed-offset tzinfo"""
+ def __init__(self, seconds, name):
+ self._offset = timedelta(seconds=seconds)
+ self._name = name
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def tzname(self, dt):
+ return self._name
+
+ def dst(self, dt):
+ return None
+
+TIMEZONES = {'UTC': TimeZone(0, 'UTC'),
+ 'EET': TimeZone(7200, 'EET'),
+ 'EEST': TimeZone(10800, 'EEST')}
+
+taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
+ 'absdiff', 'reldiff')
+TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
+
+
+def to_datetime_obj(obj):
+ """Helper for getting timestamps in datetime format"""
+ if isinstance(obj, datetime):
+ return obj
+ else:
+ return datetime.utcfromtimestamp(obj).replace(tzinfo=TIMEZONES['UTC'])
+
+
+class BSTask(dict):
+ def __init__(self, *args, **kwargs):
+ self['start_time'] = None
+ self['elapsed_time'] = None
+ self['status'] = None
+ self['iostat'] = {}
+ self['rusage'] = {}
+ self['child_rusage'] = {}
+ super(BSTask, self).__init__(*args, **kwargs)
+
+ @property
+ def cputime(self):
+ """Sum of user and system time taken by the task"""
+ return self['rusage']['ru_stime'] + self['rusage']['ru_utime'] + \
+ self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
+
+ @property
+ def walltime(self):
+ """Elapsed wall clock time"""
+ return self['elapsed_time'].total_seconds()
+
+ @property
+ def read_bytes(self):
+ """Bytes read from the block layer"""
+ return self['iostat']['read_bytes']
+
+ @property
+ def write_bytes(self):
+ """Bytes written to the block layer"""
+ return self['iostat']['write_bytes']
+
+ @property
+ def read_ops(self):
+ """Number of read operations on the block layer"""
+ return self['rusage']['ru_inblock'] + self['child_rusage']['ru_inblock']
+
+ @property
+ def write_ops(self):
+ """Number of write operations on the block layer"""
+ return self['rusage']['ru_oublock'] + self['child_rusage']['ru_oublock']
+
+
+def read_buildstats_file(buildstat_file):
+ """Convert buildstat text file into dict/json"""
+ bs_task = BSTask()
+ log.debug("Reading task buildstats from %s", buildstat_file)
+ with open(buildstat_file) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Started':
+ start_time = to_datetime_obj(float(val))
+ bs_task['start_time'] = start_time
+ elif key == 'Ended':
+ end_time = to_datetime_obj(float(val))
+ elif key.startswith('IO '):
+ split = key.split()
+ bs_task['iostat'][split[1]] = int(val)
+ elif key.find('rusage') >= 0:
+ split = key.split()
+ ru_key = split[-1]
+ if ru_key in ('ru_stime', 'ru_utime'):
+ val = float(val)
+ else:
+ val = int(val)
+ ru_type = 'rusage' if split[0] == 'rusage' else \
+ 'child_rusage'
+ bs_task[ru_type][ru_key] = val
+ elif key == 'Status':
+ bs_task['status'] = val
+ bs_task['elapsed_time'] = end_time - start_time
+ return bs_task
+
+
+def read_buildstats_dir(bs_dir):
+ """Read buildstats directory"""
+ def split_nevr(nevr):
+ """Split name and version information from recipe "nevr" string"""
+ n_e_v, revision = nevr.rsplit('-', 1)
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
+ n_e_v)
+ if not match:
+ # If we're not able to parse a version starting with a number, just
+ # take the part after last dash
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
+ n_e_v)
+ name = match.group('name')
+ version = match.group('version')
+ epoch = match.group('epoch')
+ return name, epoch, version, revision
+
+ if not os.path.isfile(os.path.join(bs_dir, 'build_stats')):
+ raise ScriptError("{} does not look like a buildstats directory".format(bs_dir))
+
+ log.debug("Reading buildstats directory %s", bs_dir)
+
+ buildstats = {}
+ subdirs = os.listdir(bs_dir)
+ for dirname in subdirs:
+ recipe_dir = os.path.join(bs_dir, dirname)
+ if not os.path.isdir(recipe_dir):
+ continue
+ name, epoch, version, revision = split_nevr(dirname)
+ recipe_bs = {'nevr': dirname,
+ 'name': name,
+ 'epoch': epoch,
+ 'version': version,
+ 'revision': revision,
+ 'tasks': {}}
+ for task in os.listdir(recipe_dir):
+ recipe_bs['tasks'][task] = [read_buildstats_file(
+ os.path.join(recipe_dir, task))]
+ if name in buildstats:
+ raise ScriptError("Cannot handle multiple versions of the same "
+ "package ({})".format(name))
+ buildstats[name] = recipe_bs
+
+ return buildstats
+
+
+def bs_append(dst, src):
+ """Append data from another buildstats"""
+ if set(dst.keys()) != set(src.keys()):
+ raise ScriptError("Refusing to join buildstats, set of packages is "
+ "different")
+ for pkg, data in dst.items():
+ if data['nevr'] != src[pkg]['nevr']:
+ raise ScriptError("Refusing to join buildstats, package version "
+ "differs: {} vs. {}".format(data['nevr'], src[pkg]['nevr']))
+ if set(data['tasks'].keys()) != set(src[pkg]['tasks'].keys()):
+ raise ScriptError("Refusing to join buildstats, set of tasks "
+ "in {} differ".format(pkg))
+ for taskname, taskdata in data['tasks'].items():
+ taskdata.extend(src[pkg]['tasks'][taskname])
+
+
+def read_buildstats_json(path):
+ """Read buildstats from JSON file"""
+ buildstats = {}
+ with open(path) as fobj:
+ bs_json = json.load(fobj)
+ for recipe_bs in bs_json:
+ if recipe_bs['name'] in buildstats:
+ raise ScriptError("Cannot handle multiple versions of the same "
+ "package ({})".format(recipe_bs['name']))
+
+ if recipe_bs['epoch'] is None:
+ recipe_bs['nevr'] = "{}-{}-{}".format(recipe_bs['name'], recipe_bs['version'], recipe_bs['revision'])
+ else:
+ recipe_bs['nevr'] = "{}-{}_{}-{}".format(recipe_bs['name'], recipe_bs['epoch'], recipe_bs['version'], recipe_bs['revision'])
+
+ for task, data in recipe_bs['tasks'].copy().items():
+ recipe_bs['tasks'][task] = [BSTask(data)]
+
+ buildstats[recipe_bs['name']] = recipe_bs
+
+ return buildstats
+
+
+def read_buildstats(path, multi):
+ """Read buildstats"""
+ if not os.path.exists(path):
+ raise ScriptError("No such file or directory: {}".format(path))
+
+ if os.path.isfile(path):
+ return read_buildstats_json(path)
+
+ if os.path.isfile(os.path.join(path, 'build_stats')):
+ return read_buildstats_dir(path)
+
+ # Handle a non-buildstat directory
+ subpaths = sorted(glob.glob(path + '/*'))
+ if len(subpaths) > 1:
+ if multi:
+ log.info("Averaging over {} buildstats from {}".format(
+ len(subpaths), path))
+ else:
+ raise ScriptError("Multiple buildstats found in '{}'. Please give "
+ "a single buildstat directory of use the --multi "
+ "option".format(path))
+ bs = None
+ for subpath in subpaths:
+ if os.path.isfile(subpath):
+ tmpbs = read_buildstats_json(subpath)
+ else:
+ tmpbs = read_buildstats_dir(subpath)
+ if not bs:
+ bs = tmpbs
+ else:
+ log.debug("Joining buildstats")
+ bs_append(bs, tmpbs)
+
+ if not bs:
+ raise ScriptError("No buildstats found under {}".format(path))
+ return bs
+
+
+def print_ver_diff(bs1, bs2):
+ """Print package version differences"""
+ pkgs1 = set(bs1.keys())
+ pkgs2 = set(bs2.keys())
+ new_pkgs = pkgs2 - pkgs1
+ deleted_pkgs = pkgs1 - pkgs2
+
+ echanged = []
+ vchanged = []
+ rchanged = []
+ unchanged = []
+ common_pkgs = pkgs2.intersection(pkgs1)
+ if common_pkgs:
+ for pkg in common_pkgs:
+ if bs1[pkg]['epoch'] != bs2[pkg]['epoch']:
+ echanged.append(pkg)
+ elif bs1[pkg]['version'] != bs2[pkg]['version']:
+ vchanged.append(pkg)
+ elif bs1[pkg]['revision'] != bs2[pkg]['revision']:
+ rchanged.append(pkg)
+ else:
+ unchanged.append(pkg)
+
+ maxlen = max([len(pkg) for pkg in pkgs1.union(pkgs2)])
+ fmt_str = " {:{maxlen}} ({})"
+# if unchanged:
+# print("\nUNCHANGED PACKAGES:")
+# print("-------------------")
+# maxlen = max([len(pkg) for pkg in unchanged])
+# for pkg in sorted(unchanged):
+# print(fmt_str.format(pkg, bs2[pkg]['nevr'], maxlen=maxlen))
+
+ if new_pkgs:
+ print("\nNEW PACKAGES:")
+ print("-------------")
+ for pkg in sorted(new_pkgs):
+ print(fmt_str.format(pkg, bs2[pkg]['nevr'], maxlen=maxlen))
+
+ if deleted_pkgs:
+ print("\nDELETED PACKAGES:")
+ print("-----------------")
+ for pkg in sorted(deleted_pkgs):
+ print(fmt_str.format(pkg, bs1[pkg]['nevr'], maxlen=maxlen))
+
+ fmt_str = " {0:{maxlen}} {1:<20} ({2})"
+ if rchanged:
+ print("\nREVISION CHANGED:")
+ print("-----------------")
+ for pkg in sorted(rchanged):
+ field1 = "{} -> {}".format(pkg, bs1[pkg]['revision'], bs2[pkg]['revision'])
+ field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
+ print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
+
+ if vchanged:
+ print("\nVERSION CHANGED:")
+ print("----------------")
+ for pkg in sorted(vchanged):
+ field1 = "{} -> {}".format(bs1[pkg]['version'], bs2[pkg]['version'])
+ field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
+ print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
+
+ if echanged:
+ print("\nEPOCH CHANGED:")
+ print("--------------")
+ for pkg in sorted(echanged):
+ field1 = "{} -> {}".format(pkg, bs1[pkg]['epoch'], bs2[pkg]['epoch'])
+ field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
+ print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
+
+
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',)):
+ """Diff task execution times"""
+ def val_to_str(val, human_readable=False):
+ """Convert raw value to printable string"""
+ def hms_time(secs):
+ """Get time in human-readable HH:MM:SS format"""
+ h = int(secs / 3600)
+ m = int((secs % 3600) / 60)
+ s = secs % 60
+ if h == 0:
+ return "{:02d}:{:04.1f}".format(m, s)
+ else:
+ return "{:d}:{:02d}:{:04.1f}".format(h, m, s)
+
+ if 'time' in val_type:
+ if human_readable:
+ return hms_time(val)
+ else:
+ return "{:.1f}s".format(val)
+ elif 'bytes' in val_type and human_readable:
+ prefix = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi']
+ dec = int(math.log(val, 2) / 10)
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}B".format(val / (2 ** (10 * dec)),
+ prefix[dec], prec=prec)
+ elif 'ops' in val_type and human_readable:
+ prefix = ['', 'k', 'M', 'G', 'T', 'P']
+ dec = int(math.log(val, 1000))
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}ops".format(val / (1000 ** dec),
+ prefix[dec], prec=prec)
+ return str(int(val))
+
+ def sum_vals(buildstats):
+ """Get cumulative sum of all tasks"""
+ total = 0.0
+ for recipe_data in buildstats.values():
+ for bs_task in recipe_data['tasks'].values():
+ total += sum([getattr(b, val_type) for b in bs_task]) / len(bs_task)
+ return total
+
+ tasks_diff = []
+
+ if min_val:
+ print("Ignoring tasks less than {} ({})".format(
+ val_to_str(min_val, True), val_to_str(min_val)))
+ if min_absdiff:
+ print("Ignoring differences less than {} ({})".format(
+ val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
+
+ # Prepare the data
+ pkgs = set(bs1.keys()).union(set(bs2.keys()))
+ for pkg in pkgs:
+ tasks1 = bs1[pkg]['tasks'] if pkg in bs1 else {}
+ tasks2 = bs2[pkg]['tasks'] if pkg in bs2 else {}
+ if not tasks1:
+ pkg_op = '+ '
+ elif not tasks2:
+ pkg_op = '- '
+ else:
+ pkg_op = ' '
+
+ for task in set(tasks1.keys()).union(set(tasks2.keys())):
+ task_op = ' '
+ if task in tasks1:
+ # Average over all values
+ val1 = [getattr(b, val_type) for b in bs1[pkg]['tasks'][task]]
+ val1 = sum(val1) / len(val1)
+ else:
+ task_op = '+ '
+ val1 = 0
+ if task in tasks2:
+ # Average over all values
+ val2 = [getattr(b, val_type) for b in bs2[pkg]['tasks'][task]]
+ val2 = sum(val2) / len(val2)
+ else:
+ val2 = 0
+ task_op = '- '
+
+ if val1 == 0:
+ reldiff = float('inf')
+ else:
+ reldiff = 100 * (val2 - val1) / val1
+
+ if max(val1, val2) < min_val:
+ log.debug("Filtering out %s:%s (%s)", pkg, task,
+ val_to_str(max(val1, val2)))
+ continue
+ if abs(val2 - val1) < min_absdiff:
+ log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
+ val_to_str(val2-val1))
+ continue
+ tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
+ val2-val1, reldiff))
+
+ # Sort our list
+ for field in reversed(sort_by):
+ if field.startswith('-'):
+ field = field[1:]
+ reverse = True
+ else:
+ reverse = False
+ tasks_diff = sorted(tasks_diff, key=attrgetter(field), reverse=reverse)
+
+ linedata = [(' ', 'PKG', ' ', 'TASK', 'ABSDIFF', 'RELDIFF',
+ val_type.upper() + '1', val_type.upper() + '2')]
+ field_lens = dict([('len_{}'.format(i), len(f)) for i, f in enumerate(linedata[0])])
+
+ # Prepare fields in string format and measure field lengths
+ for diff in tasks_diff:
+ task_prefix = diff.task_op if diff.pkg_op == ' ' else ' '
+ linedata.append((diff.pkg_op, diff.pkg, task_prefix, diff.task,
+ val_to_str(diff.absdiff),
+ '{:+.1f}%'.format(diff.reldiff),
+ val_to_str(diff.value1),
+ val_to_str(diff.value2)))
+ for i, field in enumerate(linedata[-1]):
+ key = 'len_{}'.format(i)
+ if len(field) > field_lens[key]:
+ field_lens[key] = len(field)
+
+ # Print data
+ print()
+ for fields in linedata:
+ print("{:{len_0}}{:{len_1}} {:{len_2}}{:{len_3}} {:>{len_4}} {:>{len_5}} {:>{len_6}} -> {:{len_7}}".format(
+ *fields, **field_lens))
+
+ # Print summary of the diffs
+ total1 = sum_vals(bs1)
+ total2 = sum_vals(bs2)
+ print("\nCumulative {}:".format(val_type))
+ print (" {} {:+.1f}% {} ({}) -> {} ({})".format(
+ val_to_str(total2 - total1), 100 * (total2-total1) / total1,
+ val_to_str(total1, True), val_to_str(total1),
+ val_to_str(total2, True), val_to_str(total2)))
+
+
+def parse_args(argv):
+ """Parse cmdline arguments"""
+ description="""
+Script for comparing buildstats of two separate builds."""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ min_val_defaults = {'cputime': 3.0,
+ 'read_bytes': 524288,
+ 'write_bytes': 524288,
+ 'read_ops': 500,
+ 'write_ops': 500,
+ 'walltime': 5}
+ min_absdiff_defaults = {'cputime': 1.0,
+ 'read_bytes': 131072,
+ 'write_bytes': 131072,
+ 'read_ops': 50,
+ 'write_ops': 50,
+ 'walltime': 2}
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--ver-diff', action='store_true',
+ help="Show package version differences and exit")
+ parser.add_argument('--diff-attr', default='cputime',
+ choices=min_val_defaults.keys(),
+ help="Buildstat attribute which to compare")
+ parser.add_argument('--min-val', default=min_val_defaults, type=float,
+ help="Filter out tasks less than MIN_VAL. "
+ "Default depends on --diff-attr.")
+ parser.add_argument('--min-absdiff', default=min_absdiff_defaults, type=float,
+ help="Filter out tasks whose difference is less than "
+ "MIN_ABSDIFF, Default depends on --diff-attr.")
+ parser.add_argument('--sort-by', default='absdiff',
+ help="Comma-separated list of field sort order. "
+ "Prepend the field name with '-' for reversed sort. "
+ "Available fields are: {}".format(', '.join(taskdiff_fields)))
+ parser.add_argument('--multi', action='store_true',
+ help="Read all buildstats from the given paths and "
+ "average over them")
+ parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
+ parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
+
+ args = parser.parse_args(argv)
+
+ # We do not nedd/want to read all buildstats if we just want to look at the
+ # package versions
+ if args.ver_diff:
+ args.multi = False
+
+ # Handle defaults for the filter arguments
+ if args.min_val is min_val_defaults:
+ args.min_val = min_val_defaults[args.diff_attr]
+ if args.min_absdiff is min_absdiff_defaults:
+ args.min_absdiff = min_absdiff_defaults[args.diff_attr]
+
+ return args
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ # Validate sort fields
+ sort_by = []
+ for field in args.sort_by.split(','):
+ if field.lstrip('-') not in taskdiff_fields:
+ log.error("Invalid sort field '%s' (must be one of: %s)" %
+ (field, ', '.join(taskdiff_fields)))
+ sys.exit(1)
+ sort_by.append(field)
+
+ try:
+ bs1 = read_buildstats(args.buildstats1, args.multi)
+ bs2 = read_buildstats(args.buildstats2, args.multi)
+
+ if args.ver_diff:
+ print_ver_diff(bs1, bs2)
+ else:
+ print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
+ args.min_absdiff, sort_by)
+ except ScriptError as err:
+ log.error(str(err))
+ return 1
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/import-layers/yocto-poky/scripts/cleanup-workdir b/import-layers/yocto-poky/scripts/cleanup-workdir
index 01ebd526e..98769f6b3 100755
--- a/import-layers/yocto-poky/scripts/cleanup-workdir
+++ b/import-layers/yocto-poky/scripts/cleanup-workdir
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (c) 2012 Wind River Systems, Inc.
#
@@ -27,7 +27,7 @@ obsolete_dirs = []
parser = None
def err_quit(msg):
- print msg
+ print(msg)
parser.print_usage()
sys.exit(1)
@@ -43,19 +43,19 @@ def run_command(cmd):
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
output = pipe.communicate()[0]
if pipe.returncode != 0:
- print "Execute command '%s' failed." % cmd
+ print("Execute command '%s' failed." % cmd)
sys.exit(1)
- return output
+ return output.decode('utf-8')
def get_cur_arch_dirs(workdir, arch_dirs):
pattern = workdir + '/(.*?)/'
- cmd = "bitbake -e | grep ^SDK_ARCH="
+ cmd = "bitbake -e | grep ^SDK_SYS="
output = run_command(cmd)
- sdk_arch = output.split('"')[1]
+ sdk_sys = output.split('"')[1]
# select thest 5 packages to get the dirs of current arch
- pkgs = ['hicolor-icon-theme', 'base-files', 'acl-native', 'binutils-crosssdk-' + sdk_arch, 'nativesdk-autoconf']
+ pkgs = ['hicolor-icon-theme', 'base-files', 'acl-native', 'binutils-crosssdk-' + sdk_sys, 'nativesdk-autoconf']
for pkg in pkgs:
cmd = "bitbake -e " + pkg + " | grep ^IMAGE_ROOTFS="
@@ -84,7 +84,7 @@ will be deleted. Be CAUTIOUS.""")
if os.getcwd() != builddir:
err_quit("Please run %s under: %s\n" % (os.path.basename(args[0]), builddir))
- print 'Updating bitbake caches...'
+ print('Updating bitbake caches...')
cmd = "bitbake -s"
output = run_command(cmd)
@@ -129,13 +129,13 @@ will be deleted. Be CAUTIOUS.""")
# won't fail just in case
if not tmpdir or not image_rootfs:
- print "Can't get TMPDIR or IMAGE_ROOTFS."
+ print("Can't get TMPDIR or IMAGE_ROOTFS.")
return 1
pattern = tmpdir + '/(.*?)/(.*?)/'
m = re.match(pattern, image_rootfs)
if not m:
- print "Can't get WORKDIR."
+ print("Can't get WORKDIR.")
return 1
workdir = os.path.join(tmpdir, m.group(1))
@@ -178,13 +178,13 @@ will be deleted. Be CAUTIOUS.""")
break
for d in obsolete_dirs:
- print "Deleting %s" % d
+ print("Deleting %s" % d)
shutil.rmtree(d, True)
if len(obsolete_dirs):
- print '\nTotal %d items.' % len(obsolete_dirs)
+ print('\nTotal %d items.' % len(obsolete_dirs))
else:
- print '\nNo obsolete directory found under %s.' % workdir
+ print('\nNo obsolete directory found under %s.' % workdir)
return 0
diff --git a/import-layers/yocto-poky/scripts/combo-layer b/import-layers/yocto-poky/scripts/combo-layer
index 91270415f..b90bfc880 100755
--- a/import-layers/yocto-poky/scripts/combo-layer
+++ b/import-layers/yocto-poky/scripts/combo-layer
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
@@ -26,10 +26,14 @@ import optparse
import logging
import subprocess
import tempfile
-import ConfigParser
+import configparser
import re
+import copy
+import pipes
+import shutil
from collections import OrderedDict
from string import Template
+from functools import reduce
__version__ = "0.2.1"
@@ -73,7 +77,7 @@ class Configuration(object):
else:
# Apply special type transformations for some properties.
# Type matches the RawConfigParser.get*() methods.
- types = {'signoff': 'boolean', 'update': 'boolean'}
+ types = {'signoff': 'boolean', 'update': 'boolean', 'history': 'boolean'}
if name in types:
value = getattr(parser, 'get' + types[name])(section, name)
self.repos[repo][name] = value
@@ -84,7 +88,7 @@ class Configuration(object):
self.commit_msg_template = value
logger.debug("Loading config file %s" % self.conffile)
- self.parser = ConfigParser.ConfigParser()
+ self.parser = configparser.ConfigParser()
with open(self.conffile) as f:
self.parser.readfp(f)
@@ -113,7 +117,7 @@ class Configuration(object):
self.localconffile = lcfile
logger.debug("Loading local config file %s" % self.localconffile)
- self.localparser = ConfigParser.ConfigParser()
+ self.localparser = configparser.ConfigParser()
with open(self.localconffile) as f:
self.localparser.readfp(f)
@@ -174,28 +178,28 @@ class Configuration(object):
logger.error("ERROR: patchutils package is missing, please install it (e.g. # apt-get install patchutils)")
sys.exit(1)
-def runcmd(cmd,destdir=None,printerr=True,out=None):
+def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
"""
execute command, raise CalledProcessError if fail
return output if succeed
"""
logger.debug("run cmd '%s' in %s" % (cmd, os.getcwd() if destdir is None else destdir))
if not out:
- out = os.tmpfile()
+ out = tempfile.TemporaryFile()
err = out
else:
- err = os.tmpfile()
+ err = tempfile.TemporaryFile()
try:
- subprocess.check_call(cmd, stdout=out, stderr=err, cwd=destdir, shell=isinstance(cmd, str))
- except subprocess.CalledProcessError,e:
+ subprocess.check_call(cmd, stdout=out, stderr=err, cwd=destdir, shell=isinstance(cmd, str), env=env or os.environ)
+ except subprocess.CalledProcessError as e:
err.seek(0)
if printerr:
logger.error("%s" % err.read())
raise e
err.seek(0)
- output = err.read()
- logger.debug("output: %s" % output )
+ output = err.read().decode('utf-8')
+ logger.debug("output: %s" % output.replace(chr(0), '\\0'))
return output
def action_init(conf, args):
@@ -426,7 +430,7 @@ file_exclude = %s''' % (name, file_filter or '<empty>', repo.get('file_exclude',
runcmd('git replace --graft %s %s' % (start, startrev))
try:
runcmd(merge)
- except Exception, error:
+ except Exception as error:
logger.info('''Merging component repository history failed, perhaps because of merge conflicts.
It may be possible to commit anyway after resolving these conflicts.
@@ -478,32 +482,32 @@ def check_repo_clean(repodir):
sys.exit(1)
def check_patch(patchfile):
- f = open(patchfile)
+ f = open(patchfile, 'rb')
ln = f.readline()
of = None
in_patch = False
beyond_msg = False
- pre_buf = ''
+ pre_buf = b''
while ln:
if not beyond_msg:
- if ln == '---\n':
+ if ln == b'---\n':
if not of:
break
in_patch = False
beyond_msg = True
- elif ln.startswith('--- '):
+ elif ln.startswith(b'--- '):
# We have a diff in the commit message
in_patch = True
if not of:
print('WARNING: %s contains a diff in its commit message, indenting to avoid failure during apply' % patchfile)
- of = open(patchfile + '.tmp', 'w')
+ of = open(patchfile + '.tmp', 'wb')
of.write(pre_buf)
- pre_buf = ''
- elif in_patch and not ln[0] in '+-@ \n\r':
+ pre_buf = b''
+ elif in_patch and not ln[0] in b'+-@ \n\r':
in_patch = False
if of:
if in_patch:
- of.write(' ' + ln)
+ of.write(b' ' + ln)
else:
of.write(ln)
else:
@@ -516,7 +520,7 @@ def check_patch(patchfile):
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
- print "Not a TTY so can't drop to shell for resolution, exiting."
+ print("Not a TTY so can't drop to shell for resolution, exiting.")
return False
shell = os.environ.get('SHELL', 'bash')
@@ -526,7 +530,7 @@ def drop_to_shell(workdir=None):
' exit 1 -- abort\n' % shell);
ret = subprocess.call([shell], cwd=workdir)
if ret != 0:
- print "Aborting"
+ print("Aborting")
return False
else:
return True
@@ -610,8 +614,12 @@ def action_pull(conf, args):
def action_update(conf, args):
"""
update the component repos
- generate the patch list
- apply the generated patches
+ either:
+ generate the patch list
+ apply the generated patches
+ or:
+ re-creates the entire component history and merges them
+ into the current branch with a merge commit
"""
components = [arg.split(':')[0] for arg in args[1:]]
revisions = {}
@@ -624,10 +632,22 @@ def action_update(conf, args):
# make sure combo repo is clean
check_repo_clean(os.getcwd())
- import uuid
- patch_dir = "patch-%s" % uuid.uuid4()
- if not os.path.exists(patch_dir):
- os.mkdir(patch_dir)
+ # Check whether we keep the component histories. Must be
+ # set either via --history command line parameter or consistently
+ # in combo-layer.conf. Mixing modes is (currently, and probably
+ # permanently because it would be complicated) not supported.
+ if conf.history:
+ history = True
+ else:
+ history = None
+ for name in repos:
+ repo = conf.repos[name]
+ repo_history = repo.get('history', False)
+ if history is None:
+ history = repo_history
+ elif history != repo_history:
+ logger.error("'history' property is set inconsistently")
+ sys.exit(1)
# Step 1: update the component repos
if conf.nopull:
@@ -635,6 +655,17 @@ def action_update(conf, args):
else:
action_pull(conf, ['arg0'] + components)
+ if history:
+ update_with_history(conf, components, revisions, repos)
+ else:
+ update_with_patches(conf, components, revisions, repos)
+
+def update_with_patches(conf, components, revisions, repos):
+ import uuid
+ patch_dir = "patch-%s" % uuid.uuid4()
+ if not os.path.exists(patch_dir):
+ os.mkdir(patch_dir)
+
for name in repos:
revision = revisions.get(name, None)
repo = conf.repos[name]
@@ -711,6 +742,21 @@ def action_update(conf, args):
runcmd("rm -rf %s" % patch_dir)
# Step 7: commit the updated config file if it's being tracked
+ commit_conf_file(conf, components)
+
+def conf_commit_msg(conf, components):
+ # create the "components" string
+ component_str = "all components"
+ if len(components) > 0:
+ # otherwise tell which components were actually changed
+ component_str = ", ".join(components)
+
+ # expand the template with known values
+ template = Template(conf.commit_msg_template)
+ msg = template.substitute(components = component_str)
+ return msg
+
+def commit_conf_file(conf, components, commit=True):
relpath = os.path.relpath(conf.conffile)
try:
output = runcmd("git status --porcelain %s" % relpath, printerr=False)
@@ -718,23 +764,15 @@ def action_update(conf, args):
# Outside the repository
output = None
if output:
- logger.info("Committing updated configuration file")
if output.lstrip().startswith("M"):
-
- # create the "components" string
- component_str = "all components"
- if len(components) > 0:
- # otherwise tell which components were actually changed
- component_str = ", ".join(components)
-
- # expand the template with known values
- template = Template(conf.commit_msg_template)
- raw_msg = template.substitute(components = component_str)
-
- # sanitize the string before using it in command line
- msg = raw_msg.replace('"', '\\"')
-
- runcmd('git commit -m "%s" %s' % (msg, relpath))
+ logger.info("Committing updated configuration file")
+ if commit:
+ msg = conf_commit_msg(conf, components)
+ runcmd('git commit -m'.split() + [msg, relpath])
+ else:
+ runcmd('git add %s' % relpath)
+ return True
+ return False
def apply_patchlist(conf, repos):
"""
@@ -852,6 +890,418 @@ def action_splitpatch(conf, args):
else:
logger.info(patch_filename)
+def update_with_history(conf, components, revisions, repos):
+ '''Update all components with full history.
+
+ Works by importing all commits reachable from a component's
+ current head revision. If those commits are rooted in an already
+ imported commit, their content gets mixed with the content of the
+ combined repo of that commit (new or modified files overwritten,
+ removed files removed).
+
+ The last commit is an artificial merge commit that merges all the
+ updated components into the combined repository.
+
+ The HEAD ref only gets updated at the very end. All intermediate work
+ happens in a worktree which will get garbage collected by git eventually
+ after a failure.
+ '''
+ # Remember current HEAD and what we need to add to it.
+ head = runcmd("git rev-parse HEAD").strip()
+ additional_heads = {}
+
+ # Track the mapping between original commit and commit in the
+ # combined repo. We do not have to distinguish between components,
+ # because commit hashes are different anyway. Often we can
+ # skip find_revs() entirely (for example, when all new commits
+ # are derived from the last imported revision).
+ #
+ # Using "head" (typically the merge commit) instead of the actual
+ # commit for the component leads to a nicer history in the combined
+ # repo.
+ old2new_revs = {}
+ for name in repos:
+ repo = conf.repos[name]
+ revision = repo['last_revision']
+ if revision:
+ old2new_revs[revision] = head
+
+ def add_p(parents):
+ '''Insert -p before each entry.'''
+ parameters = []
+ for p in parents:
+ parameters.append('-p')
+ parameters.append(p)
+ return parameters
+
+ # Do all intermediate work with a separate work dir and index,
+ # chosen via env variables (can't use "git worktree", it is too
+ # new). This is useful (no changes to current work tree unless the
+ # update succeeds) and required (otherwise we end up temporarily
+ # removing the combo-layer hooks that we currently use when
+ # importing a new component).
+ #
+ # Not cleaned up after a failure at the moment.
+ wdir = os.path.join(os.getcwd(), ".git", "combo-layer")
+ windex = wdir + ".index"
+ if os.path.isdir(wdir):
+ shutil.rmtree(wdir)
+ os.mkdir(wdir)
+ wenv = copy.deepcopy(os.environ)
+ wenv["GIT_WORK_TREE"] = wdir
+ wenv["GIT_INDEX_FILE"] = windex
+ # This one turned out to be needed in practice.
+ wenv["GIT_OBJECT_DIRECTORY"] = os.path.join(os.getcwd(), ".git", "objects")
+ wargs = {"destdir": wdir, "env": wenv}
+
+ for name in repos:
+ revision = revisions.get(name, None)
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ dest_dir = repo['dest_dir']
+ branch = repo.get('branch', "master")
+ hook = repo.get('hook', None)
+ largs = {"destdir": ldir, "env": None}
+ file_include = repo.get('file_filter', '').split()
+ file_include.sort() # make sure that short entries like '.' come first.
+ file_exclude = repo.get('file_exclude', '').split()
+
+ def include_file(file):
+ if not file_include:
+ # No explicit filter set, include file.
+ return True
+ for filter in file_include:
+ if filter == '.':
+ # Another special case: include current directory and thus all files.
+ return True
+ if os.path.commonprefix((filter, file)) == filter:
+ # Included in directory or direct file match.
+ return True
+ # Check for wildcard match *with* allowing * to match /, i.e.
+ # src/*.c does match src/foobar/*.c. That's not how it is done elsewhere
+ # when passing the filtering to "git archive", but it is unclear what
+ # the intended semantic is (the comment on file_exclude that "append a * wildcard
+ # at the end" to match the full content of a directories implies that
+ # slashes are indeed not special), so here we simply do what's easy to
+ # implement in Python.
+ logger.debug('fnmatch(%s, %s)' % (file, filter))
+ if fnmatch.fnmatchcase(file, filter):
+ return True
+ return False
+
+ def exclude_file(file):
+ for filter in file_exclude:
+ if fnmatch.fnmatchcase(file, filter):
+ return True
+ return False
+
+ def file_filter(files):
+ '''Clean up file list so that only included files remain.'''
+ index = 0
+ while index < len(files):
+ file = files[index]
+ if not include_file(file) or exclude_file(file):
+ del files[index]
+ else:
+ index += 1
+
+
+ # Generate the revision list.
+ logger.info("Analyzing commits from %s..." % name)
+ top_revision = revision or branch
+ if not check_rev_branch(name, ldir, top_revision, branch):
+ sys.exit(1)
+
+ last_revision = repo['last_revision']
+ rev_list_args = "--full-history --sparse --topo-order --reverse"
+ if not last_revision:
+ logger.info("Warning: last_revision of component %s is not set, starting from the first commit" % name)
+ rev_list_args = rev_list_args + ' ' + top_revision
+ else:
+ if not check_rev_branch(name, ldir, last_revision, branch):
+ sys.exit(1)
+ rev_list_args = "%s %s..%s" % (rev_list_args, last_revision, top_revision)
+
+ # By definition, the current HEAD contains the latest imported
+ # commit of each component. We use that as initial mapping even
+ # though the commits do not match exactly because
+ # a) it always works (in contrast to find_revs, which relies on special
+ # commit messages)
+ # b) it is faster than find_revs, which will only be called on demand
+ # and can be skipped entirely in most cases
+ # c) last but not least, the combined history looks nicer when all
+ # new commits are rooted in the same merge commit
+ old2new_revs[last_revision] = head
+
+ # We care about all commits (--full-history and --sparse) and
+ # we want reconstruct the topology and thus do not care
+ # about ordering by time (--topo-order). We ask for the ones
+ # we need to import first to be listed first (--reverse).
+ revs = runcmd("git rev-list %s" % rev_list_args, **largs).split()
+ logger.debug("To be imported: %s" % revs)
+ # Now 'revs' contains all revisions reachable from the top revision.
+ # All revisions derived from the 'last_revision' definitely are new,
+ # whereas the others may or may not have been imported before. For
+ # a linear history in the component, that second set will be empty.
+ # To distinguish between them, we also get the shorter list
+ # of revisions starting at the ancestor.
+ if last_revision:
+ ancestor_revs = runcmd("git rev-list --ancestry-path %s" % rev_list_args, **largs).split()
+ else:
+ ancestor_revs = []
+ logger.debug("Ancestors: %s" % ancestor_revs)
+
+ # Now import each revision.
+ logger.info("Importing commits from %s..." % name)
+ def import_rev(rev):
+ global scanned_revs
+
+ # If it is part of the new commits, we definitely need
+ # to import it. Otherwise we need to check, we might have
+ # imported it before. If it was imported and we merely
+ # fail to find it because commit messages did not track
+ # the mapping, then we end up importing it again. So
+ # combined repos using "updating with history" really should
+ # enable the "From ... rev:" commit header modifications.
+ if rev not in ancestor_revs and rev not in old2new_revs and not scanned_revs:
+ logger.debug("Revision %s triggers log analysis." % rev)
+ find_revs(old2new_revs, head)
+ scanned_revs = True
+ new_rev = old2new_revs.get(rev, None)
+ if new_rev:
+ return new_rev
+
+ # If the commit is not in the original list of revisions
+ # to be imported, then it must be a parent of one of those
+ # commits and it was skipped during earlier imports or not
+ # found. Importing such merge commits leads to very ugly
+ # history (long cascade of merge commits which all point
+ # to to older commits) when switching from "update via
+ # patches" to "update with history".
+ #
+ # We can avoid importing merge commits if all non-merge commits
+ # reachable from it were already imported. In that case we
+ # can root the new commits in the current head revision.
+ def is_imported(prev):
+ parents = runcmd("git show --no-patch --pretty=format:%P " + prev, **largs).split()
+ if len(parents) > 1:
+ for p in parents:
+ if not is_imported(p):
+ logger.debug("Must import %s because %s is not imported." % (rev, p))
+ return False
+ return True
+ elif prev in old2new_revs:
+ return True
+ else:
+ logger.debug("Must import %s because %s is not imported." % (rev, prev))
+ return False
+ if rev not in revs and is_imported(rev):
+ old2new_revs[rev] = head
+ return head
+
+ # Need to import rev. Collect some information about it.
+ logger.debug("Importing %s" % rev)
+ (parents, author_name, author_email, author_timestamp, body) = \
+ runcmd("git show --no-patch --pretty=format:%P%x00%an%x00%ae%x00%at%x00%B " + rev, **largs).split(chr(0))
+ parents = parents.split()
+ if parents:
+ # Arbitrarily pick the first parent as base. It may or may not have
+ # been imported before. For example, if the parent is a merge commit
+ # and previously the combined repository used patching as update
+ # method, then the actual merge commit parent never was imported.
+ # To cover this, We recursively import parents.
+ parent = parents[0]
+ new_parent = import_rev(parent)
+ # Clean index and working tree. TODO: can we combine this and the
+ # next into one command with less file IO?
+ # "git reset --hard" does not work, it changes HEAD of the parent
+ # repo, which we wanted to avoid. Probably need to keep
+ # track of the rev that corresponds to the index and use apply_commit().
+ runcmd("git rm -q --ignore-unmatch -rf .", **wargs)
+ # Update index and working tree to match the parent.
+ runcmd("git checkout -q -f %s ." % new_parent, **wargs)
+ else:
+ parent = None
+ # Clean index and working tree.
+ runcmd("git rm -q --ignore-unmatch -rf .", **wargs)
+
+ # Modify index and working tree such that it mirrors the commit.
+ apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=file_filter)
+
+ # Now commit.
+ new_tree = runcmd("git write-tree", **wargs).strip()
+ env = copy.deepcopy(wenv)
+ env['GIT_AUTHOR_NAME'] = author_name
+ env['GIT_AUTHOR_EMAIL'] = author_email
+ env['GIT_AUTHOR_DATE'] = author_timestamp
+ if hook:
+ # Need to turn the verbatim commit message into something resembling a patch header
+ # for the hook.
+ with tempfile.NamedTemporaryFile(delete=False) as patch:
+ patch.write('Subject: [PATCH] ')
+ patch.write(body)
+ patch.write('\n---\n')
+ patch.close()
+ runcmd([hook, patch.name, rev, name])
+ with open(patch.name) as f:
+ body = f.read()[len('Subject: [PATCH] '):][:-len('\n---\n')]
+
+ # We can skip non-merge commits that did not change any files. Those are typically
+ # the result of file filtering, although they could also have been introduced
+ # intentionally upstream, in which case we drop some information here.
+ if len(parents) == 1:
+ parent_rev = import_rev(parents[0])
+ old_tree = runcmd("git show -s --pretty=format:%T " + parent_rev, **wargs).strip()
+ commit = old_tree != new_tree
+ if not commit:
+ new_rev = parent_rev
+ else:
+ commit = True
+ if commit:
+ new_rev = runcmd("git commit-tree".split() + add_p([import_rev(p) for p in parents]) +
+ ["-m", body, new_tree],
+ env=env).strip()
+ old2new_revs[rev] = new_rev
+
+ return new_rev
+
+ if revs:
+ for rev in revs:
+ import_rev(rev)
+ # Remember how to update our current head. New components get added,
+ # updated components get the delta between current head and the updated component
+ # applied.
+ additional_heads[old2new_revs[revs[-1]]] = head if repo['last_revision'] else None
+ repo['last_revision'] = revs[-1]
+
+ # Now construct the final merge commit. We create the tree by
+ # starting with the head and applying the changes from each
+ # components imported head revision.
+ if additional_heads:
+ runcmd("git reset --hard", **wargs)
+ for rev, base in additional_heads.items():
+ apply_commit(base, rev, wargs, wargs, None)
+
+ # Commit with all component branches as parents as well as the previous head.
+ logger.info("Writing final merge commit...")
+ msg = conf_commit_msg(conf, components)
+ new_tree = runcmd("git write-tree", **wargs).strip()
+ new_rev = runcmd("git commit-tree".split() +
+ add_p([head] + list(additional_heads.keys())) +
+ ["-m", msg, new_tree],
+ **wargs).strip()
+ # And done! This is the first time we change the HEAD in the actual work tree.
+ runcmd("git reset --hard %s" % new_rev)
+
+ # Update and stage the (potentially modified)
+ # combo-layer.conf, but do not commit separately.
+ for name in repos:
+ repo = conf.repos[name]
+ rev = repo['last_revision']
+ conf.update(name, "last_revision", rev)
+ if commit_conf_file(conf, components, False):
+ # Must augment the previous commit.
+ runcmd("git commit --amend -C HEAD")
+
+
+scanned_revs = False
+def find_revs(old2new, head):
+ '''Construct mapping from original commit hash to commit hash in
+ combined repo by looking at the commit messages. Depends on the
+ "From ... rev: ..." convention.'''
+ logger.info("Analyzing log messages to find previously imported commits...")
+ num_known = len(old2new)
+ log = runcmd("git log --grep='From .* rev: [a-fA-F0-9][a-fA-F0-9]*' --pretty=format:%H%x00%B%x00 " + head).split(chr(0))
+ regex = re.compile(r'From .* rev: ([a-fA-F0-9]+)')
+ for new_rev, body in zip(*[iter(log)]* 2):
+ # Use the last one, in the unlikely case there are more than one.
+ rev = regex.findall(body)[-1]
+ if rev not in old2new:
+ old2new[rev] = new_rev.strip()
+ logger.info("Found %d additional commits, leading to: %s" % (len(old2new) - num_known, old2new))
+
+
+def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
+ '''Compare revision against parent, remove files deleted in the
+ commit, re-write new or modified ones. Moves them into dest_dir.
+ Optionally filters files.
+ '''
+ if not dest_dir:
+ dest_dir = "."
+ # -r recurses into sub-directories, given is the full overview of
+ # what changed. We do not care about copy/edits or renames, so we
+ # can disable those with --no-renames (but we still parse them,
+ # because it was not clear from git documentation whether C and M
+ # lines can still occur).
+ logger.debug("Applying changes between %s and %s in %s" % (parent, rev, largs["destdir"]))
+ delete = []
+ update = []
+ if parent:
+ # Apply delta.
+ changes = runcmd("git diff-tree --no-commit-id --no-renames --name-status -r --raw -z %s %s" % (parent, rev), **largs).split(chr(0))
+ for status, name in zip(*[iter(changes)]*2):
+ if status[0] in "ACMRT":
+ update.append(name)
+ elif status[0] in "D":
+ delete.append(name)
+ else:
+ logger.error("Unknown status %s of file %s in revision %s" % (status, name, rev))
+ sys.exit(1)
+ else:
+ # Copy all files.
+ update.extend(runcmd("git ls-tree -r --name-only -z %s" % rev, **largs).split(chr(0)))
+
+ # Include/exclude files as define in the component config.
+ # Both updated and deleted file lists get filtered, because it might happen
+ # that a file gets excluded, pulled from a different component, and then the
+ # excluded file gets deleted. In that case we must keep the copy.
+ if file_filter:
+ file_filter(update)
+ file_filter(delete)
+
+ # We export into a tar archive here and extract with tar because it is simple (no
+ # need to implement file and symlink writing ourselves) and gives us some degree
+ # of parallel IO. The downside is that we have to pass the list of files via
+ # command line parameters - hopefully there will never be too many at once.
+ if update:
+ target = os.path.join(wargs["destdir"], dest_dir)
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ quoted_target = pipes.quote(target)
+ # os.sysconf('SC_ARG_MAX') is lying: running a command with
+ # string length 629343 already failed with "Argument list too
+ # long" although SC_ARG_MAX = 2097152. "man execve" explains
+ # the limitations, but those are pretty complicated. So here
+ # we just hard-code a fixed value which is more likely to work.
+ max_cmdsize = 64 * 1024
+ while update:
+ quoted_args = []
+ unquoted_args = []
+ cmdsize = 100 + len(quoted_target)
+ while update:
+ quoted_next = pipes.quote(update[0])
+ size_next = len(quoted_next) + len(dest_dir) + 1
+ logger.debug('cmdline length %d + %d < %d?' % (cmdsize, size_next, os.sysconf('SC_ARG_MAX')))
+ if cmdsize + size_next < max_cmdsize:
+ quoted_args.append(quoted_next)
+ unquoted_args.append(update.pop(0))
+ cmdsize += size_next
+ else:
+ logger.debug('Breaking the cmdline at length %d' % cmdsize)
+ break
+ logger.debug('Final cmdline length %d / %d' % (cmdsize, os.sysconf('SC_ARG_MAX')))
+ cmd = "git archive %s %s | tar -C %s -xf -" % (rev, ' '.join(quoted_args), quoted_target)
+ logger.debug('First cmdline length %d' % len(cmd))
+ runcmd(cmd, **largs)
+ cmd = "git add -f".split() + [os.path.join(dest_dir, x) for x in unquoted_args]
+ logger.debug('Second cmdline length %d' % reduce(lambda x, y: x + len(y), cmd, 0))
+ runcmd(cmd, **wargs)
+ if delete:
+ for path in delete:
+ if dest_dir:
+ path = os.path.join(dest_dir, path)
+ runcmd("git rm -f --ignore-unmatch".split() + [os.path.join(dest_dir, x) for x in delete], **wargs)
+
def action_error(conf, args):
logger.info("invalid action %s" % args[0])
diff --git a/import-layers/yocto-poky/scripts/contrib/bbvars.py b/import-layers/yocto-poky/scripts/contrib/bbvars.py
index 0896d6444..d8d059477 100755
--- a/import-layers/yocto-poky/scripts/contrib/bbvars.py
+++ b/import-layers/yocto-poky/scripts/contrib/bbvars.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -24,12 +24,12 @@ import os.path
import re
def usage():
- print 'Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0])
- print ' -d FILENAME documentation file to search'
- print ' -h, --help display this help and exit'
- print ' -m METADIR meta directory to search for recipes'
- print ' -t FILENAME documentation config file (for doc tags)'
- print ' -T Only display variables with doc tags (requires -t)'
+ print('Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0]))
+ print(' -d FILENAME documentation file to search')
+ print(' -h, --help display this help and exit')
+ print(' -m METADIR meta directory to search for recipes')
+ print(' -t FILENAME documentation config file (for doc tags)')
+ print(' -T Only display variables with doc tags (requires -t)')
def recipe_bbvars(recipe):
''' Return a unique set of every bbvar encountered in the recipe '''
@@ -37,9 +37,9 @@ def recipe_bbvars(recipe):
vset = set()
try:
r = open(recipe)
- except IOError as (errno, strerror):
- print 'WARNING: Failed to open recipe ', recipe
- print strerror
+ except IOError as err:
+ print('WARNING: Failed to open recipe ', recipe)
+ print(err.args[1])
for line in r:
# Strip any comments from the line
@@ -59,8 +59,8 @@ def collect_bbvars(metadir):
for root,dirs,files in os.walk(metadir):
for name in files:
if name.find(".bb") >= 0:
- for key in recipe_bbvars(os.path.join(root,name)).iterkeys():
- if bbvars.has_key(key):
+ for key in recipe_bbvars(os.path.join(root,name)).keys():
+ if key in bbvars:
bbvars[key] = bbvars[key] + 1
else:
bbvars[key] = 1
@@ -71,9 +71,9 @@ def bbvar_is_documented(var, docfiles):
for doc in docfiles:
try:
f = open(doc)
- except IOError as (errno, strerror):
- print 'WARNING: Failed to open doc ', doc
- print strerror
+ except IOError as err:
+ print('WARNING: Failed to open doc ', doc)
+ print(err.args[1])
for line in f:
if prog.match(line):
return True
@@ -87,8 +87,8 @@ def bbvar_doctag(var, docconf):
try:
f = open(docconf)
- except IOError as (errno, strerror):
- return strerror
+ except IOError as err:
+ return err.args[1]
for line in f:
m = prog.search(line)
@@ -109,8 +109,8 @@ def main():
# Collect and validate input
try:
opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"])
- except getopt.GetoptError, err:
- print '%s' % str(err)
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
usage()
sys.exit(2)
@@ -122,13 +122,13 @@ def main():
if os.path.isfile(a):
docfiles.append(a)
else:
- print 'ERROR: documentation file %s is not a regular file' % (a)
+ print('ERROR: documentation file %s is not a regular file' % a)
sys.exit(3)
elif o == '-m':
if os.path.isdir(a):
metadirs.append(a)
else:
- print 'ERROR: meta directory %s is not a directory' % (a)
+ print('ERROR: meta directory %s is not a directory' % a)
sys.exit(4)
elif o == "-t":
if os.path.isfile(a):
@@ -139,31 +139,31 @@ def main():
assert False, "unhandled option"
if len(docfiles) == 0:
- print 'ERROR: no docfile specified'
+ print('ERROR: no docfile specified')
usage()
sys.exit(5)
if len(metadirs) == 0:
- print 'ERROR: no metadir specified'
+ print('ERROR: no metadir specified')
usage()
sys.exit(6)
if onlydoctags and docconf == "":
- print 'ERROR: no docconf specified'
+ print('ERROR: no docconf specified')
usage()
sys.exit(7)
# Collect all the variable names from the recipes in the metadirs
for m in metadirs:
- for key,cnt in collect_bbvars(m).iteritems():
- if bbvars.has_key(key):
+ for key,cnt in collect_bbvars(m).items():
+ if key in bbvars:
bbvars[key] = bbvars[key] + cnt
else:
bbvars[key] = cnt
# Check each var for documentation
varlen = 0
- for v in bbvars.iterkeys():
+ for v in bbvars.keys():
if len(v) > varlen:
varlen = len(v)
if not bbvar_is_documented(v, docfiles):
@@ -172,14 +172,14 @@ def main():
varlen = varlen + 1
# Report all undocumented variables
- print 'Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars))
+ print('Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars)))
header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7))
- print header
- print str("").ljust(len(header), '=')
+ print(header)
+ print(str("").ljust(len(header), '='))
for v in undocumented:
doctag = bbvar_doctag(v, docconf)
if not onlydoctags or not doctag == "":
- print '%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag)
+ print('%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag))
if __name__ == "__main__":
diff --git a/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh b/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh
new file mode 100755
index 000000000..e03ea978b
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+#
+# Build performance test script wrapper
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+#
+# This script is a simple wrapper around the actual build performance tester
+# script. This script initializes the build environment, runs
+# oe-build-perf-test and archives the results.
+
+script=`basename $0`
+archive_dir=~/perf-results/archives
+
+usage () {
+cat << EOF
+Usage: $script [-h] [-c COMMITISH] [-C GIT_REPO]
+
+Optional arguments:
+ -h show this help and exit.
+ -a ARCHIVE_DIR archive results tarball here, give an empty string to
+ disable tarball archiving (default: $archive_dir)
+ -c COMMITISH test (checkout) this commit
+ -C GIT_REPO commit results into Git
+ -w WORK_DIR work dir for this script
+ (default: GIT_TOP_DIR/build-perf-test)
+EOF
+}
+
+
+# Parse command line arguments
+commitish=""
+while getopts "ha:c:C:w:" opt; do
+ case $opt in
+ h) usage
+ exit 0
+ ;;
+ a) archive_dir=`realpath "$OPTARG"`
+ ;;
+ c) commitish=$OPTARG
+ ;;
+ C) results_repo=`realpath "$OPTARG"`
+ commit_results=("--commit-results" "$results_repo")
+ ;;
+ w) base_dir=`realpath "$OPTARG"`
+ ;;
+ *) usage
+ exit 1
+ ;;
+ esac
+done
+
+# Check positional args
+shift "$((OPTIND - 1))"
+if [ $# -ne 0 ]; then
+ echo "ERROR: No positional args are accepted."
+ usage
+ exit 1
+fi
+
+echo "Running on `uname -n`"
+if ! git_topdir=$(git rev-parse --show-toplevel); then
+ echo "The current working dir doesn't seem to be a git clone. Please cd there before running `basename $0`"
+ exit 1
+fi
+
+cd "$git_topdir"
+
+if [ -n "$commitish" ]; then
+ # Checkout correct revision
+ echo "Checking out $commitish"
+ git fetch &> /dev/null
+ git checkout HEAD^0 &> /dev/null
+ git branch -D $commitish &> /dev/null
+ if ! git checkout -f $commitish &> /dev/null; then
+ echo "Git checkout failed"
+ exit 1
+ fi
+fi
+
+# Setup build environment
+if [ -z "$base_dir" ]; then
+ base_dir="$git_topdir/build-perf-test"
+fi
+echo "Using working dir $base_dir"
+
+timestamp=`date "+%Y%m%d%H%M%S"`
+git_rev=$(git rev-parse --short HEAD) || exit 1
+build_dir="$base_dir/build-$git_rev-$timestamp"
+results_dir="$base_dir/results-$git_rev-$timestamp"
+globalres_log="$base_dir/globalres.log"
+machine="qemux86"
+
+mkdir -p "$base_dir"
+source ./oe-init-build-env $build_dir >/dev/null || exit 1
+
+# Additional config
+auto_conf="$build_dir/conf/auto.conf"
+echo "MACHINE = \"$machine\"" > "$auto_conf"
+echo 'BB_NUMBER_THREADS = "8"' >> "$auto_conf"
+echo 'PARALLEL_MAKE = "-j 8"' >> "$auto_conf"
+echo "DL_DIR = \"$base_dir/downloads\"" >> "$auto_conf"
+# Disabling network sanity check slightly reduces the variance of timing results
+echo 'CONNECTIVITY_CHECK_URIS = ""' >> "$auto_conf"
+# Possibility to define extra settings
+if [ -f "$base_dir/auto.conf.extra" ]; then
+ cat "$base_dir/auto.conf.extra" >> "$auto_conf"
+fi
+
+# Run actual test script
+oe-build-perf-test --out-dir "$results_dir" \
+ --globalres-file "$globalres_log" \
+ --lock-file "$base_dir/oe-build-perf.lock" \
+ "${commit_results[@]}" \
+ --commit-results-branch "{tester_host}/{git_branch}/$machine" \
+ --commit-results-tag "{tester_host}/{git_branch}/$machine/{git_commit_count}-g{git_commit}/{tag_num}"
+
+case $? in
+ 1) echo "ERROR: oe-build-perf-test script failed!"
+ exit 1
+ ;;
+ 2) echo "NOTE: some tests failed!"
+ ;;
+esac
+
+echo -ne "\n\n-----------------\n"
+echo "Global results file:"
+echo -ne "\n"
+
+cat "$globalres_log"
+
+if [ -n "$archive_dir" ]; then
+ echo -ne "\n\n-----------------\n"
+ echo "Archiving results in $archive_dir"
+ mkdir -p "$archive_dir"
+ results_basename=`basename "$results_dir"`
+ results_dirname=`dirname "$results_dir"`
+ tar -czf "$archive_dir/`uname -n`-${results_basename}.tar.gz" -C "$results_dirname" "$results_basename"
+fi
+
+rm -rf "$build_dir"
+rm -rf "$results_dir"
+
+echo "DONE"
diff --git a/import-layers/yocto-poky/scripts/contrib/ddimage b/import-layers/yocto-poky/scripts/contrib/ddimage
index a503f11d0..ab929957a 100755
--- a/import-layers/yocto-poky/scripts/contrib/ddimage
+++ b/import-layers/yocto-poky/scripts/contrib/ddimage
@@ -100,5 +100,9 @@ if [ "$RESPONSE" != "y" ]; then
fi
echo "Writing image..."
-dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE"
+if which pv >/dev/null 2>&1; then
+ pv "$IMAGE" | dd of="$DEVICE" bs="$BLOCKSIZE"
+else
+ dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE"
+fi
sync
diff --git a/import-layers/yocto-poky/scripts/contrib/devtool-stress.py b/import-layers/yocto-poky/scripts/contrib/devtool-stress.py
index 8cf92ca2f..d555c51a6 100755
--- a/import-layers/yocto-poky/scripts/contrib/devtool-stress.py
+++ b/import-layers/yocto-poky/scripts/contrib/devtool-stress.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# devtool stress tester
#
@@ -43,15 +43,15 @@ def select_recipes(args):
tinfoil = bb.tinfoil.Tinfoil()
tinfoil.prepare(False)
- pkg_pn = tinfoil.cooker.recipecache.pkg_pn
- (latest_versions, preferred_versions) = bb.providers.findProviders(tinfoil.config_data, tinfoil.cooker.recipecache, pkg_pn)
+ pkg_pn = tinfoil.cooker.recipecaches[''].pkg_pn
+ (latest_versions, preferred_versions) = bb.providers.findProviders(tinfoil.config_data, tinfoil.cooker.recipecaches[''], pkg_pn)
skip_classes = args.skip_classes.split(',')
recipelist = []
for pn in sorted(pkg_pn):
pref = preferred_versions[pn]
- inherits = [os.path.splitext(os.path.basename(f))[0] for f in tinfoil.cooker.recipecache.inherits[pref[1]]]
+ inherits = [os.path.splitext(os.path.basename(f))[0] for f in tinfoil.cooker.recipecaches[''].inherits[pref[1]]]
for cls in skip_classes:
if cls in inherits:
break
@@ -121,14 +121,18 @@ def stress_extract(args):
sys.stdout.write('Testing %s ' % (pn + ' ').ljust(40, '.'))
sys.stdout.flush()
failed = False
+ skipped = None
srctree = os.path.join(tmpdir, pn)
try:
bb.process.run('devtool extract %s %s' % (pn, srctree))
- except bb.process.CmdError as exc:
- failed = True
- with open('stress_%s_extract.log' % pn, 'w') as f:
- f.write(str(exc))
+ except bb.process.ExecutionError as exc:
+ if exc.exitcode == 4:
+ skipped = 'incompatible'
+ else:
+ failed = True
+ with open('stress_%s_extract.log' % pn, 'w') as f:
+ f.write(str(exc))
if os.path.exists(srctree):
shutil.rmtree(srctree)
@@ -136,6 +140,8 @@ def stress_extract(args):
if failed:
print('failed')
failures += 1
+ elif skipped:
+ print('skipped (%s)' % skipped)
else:
print('ok')
except KeyboardInterrupt:
@@ -162,29 +168,34 @@ def stress_modify(args):
sys.stdout.flush()
failed = False
reset = True
+ skipped = None
srctree = os.path.join(tmpdir, pn)
try:
bb.process.run('devtool modify -x %s %s' % (pn, srctree))
- except bb.process.CmdError as exc:
- with open('stress_%s_modify.log' % pn, 'w') as f:
- f.write(str(exc))
- failed = 'modify'
- reset = False
-
- if not failed:
- try:
- bb.process.run('bitbake -c install %s' % pn)
- except bb.process.CmdError as exc:
- with open('stress_%s_install.log' % pn, 'w') as f:
+ except bb.process.ExecutionError as exc:
+ if exc.exitcode == 4:
+ skipped = 'incompatible'
+ else:
+ with open('stress_%s_modify.log' % pn, 'w') as f:
f.write(str(exc))
- failed = 'build'
- if reset:
- try:
- bb.process.run('devtool reset %s' % pn)
- except bb.process.CmdError as exc:
- print('devtool reset failed: %s' % str(exc))
- break
+ failed = 'modify'
+ reset = False
+
+ if not skipped:
+ if not failed:
+ try:
+ bb.process.run('bitbake -c install %s' % pn)
+ except bb.process.CmdError as exc:
+ with open('stress_%s_install.log' % pn, 'w') as f:
+ f.write(str(exc))
+ failed = 'build'
+ if reset:
+ try:
+ bb.process.run('devtool reset %s' % pn)
+ except bb.process.CmdError as exc:
+ print('devtool reset failed: %s' % str(exc))
+ break
if os.path.exists(srctree):
shutil.rmtree(srctree)
@@ -192,6 +203,8 @@ def stress_modify(args):
if failed:
print('failed (%s)' % failed)
failures += 1
+ elif skipped:
+ print('skipped (%s)' % skipped)
else:
print('ok')
except KeyboardInterrupt:
@@ -210,9 +223,10 @@ def main():
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
parser.add_argument('-r', '--resume-from', help='Resume from specified recipe', metavar='PN')
parser.add_argument('-o', '--only', help='Only test specified recipes (comma-separated without spaces, wildcards allowed)', metavar='PNLIST')
- parser.add_argument('-s', '--skip', help='Skip specified recipes (comma-separated without spaces, wildcards allowed)', metavar='PNLIST')
+ parser.add_argument('-s', '--skip', help='Skip specified recipes (comma-separated without spaces, wildcards allowed)', metavar='PNLIST', default='gcc-source-*,kernel-devsrc,package-index,perf,meta-world-pkgdata,glibc-locale,glibc-mtrace,glibc-scripts,os-release')
parser.add_argument('-c', '--skip-classes', help='Skip recipes inheriting specified classes (comma-separated) - default %(default)s', metavar='CLASSLIST', default='native,nativesdk,cross,cross-canadian,image,populate_sdk,meta,packagegroup')
subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
parser_modify = subparsers.add_parser('modify',
help='Run "devtool modify" followed by a build with bitbake on matching recipes',
diff --git a/import-layers/yocto-poky/scripts/contrib/graph-tool b/import-layers/yocto-poky/scripts/contrib/graph-tool
index 6dc7d337f..1df5b8c34 100755
--- a/import-layers/yocto-poky/scripts/contrib/graph-tool
+++ b/import-layers/yocto-poky/scripts/contrib/graph-tool
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Simple graph query utility
# useful for getting answers from .dot files produced by bitbake -g
@@ -30,8 +30,7 @@ def get_path_networkx(dotfile, fromnode, tonode):
print('ERROR: Please install the networkx python module')
sys.exit(1)
- graph = networkx.DiGraph(networkx.read_dot(dotfile))
-
+ graph = networkx.DiGraph(networkx.nx_pydot.read_dot(dotfile))
def node_missing(node):
import difflib
close_matches = difflib.get_close_matches(node, graph.nodes(), cutoff=0.7)
@@ -53,11 +52,11 @@ def find_paths(args, usage):
fromnode = args[1]
tonode = args[2]
- paths = list(get_path_networkx(args[0], fromnode, tonode))
- if paths:
- for path in paths:
- print ' -> '.join(path)
- else:
+
+ path = None
+ for path in get_path_networkx(args[0], fromnode, tonode):
+ print(" -> ".join(map(str, path)))
+ if not path:
print("ERROR: no path from %s to %s in graph" % (fromnode, tonode))
sys.exit(1)
diff --git a/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py b/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
index 2f3b8b06a..389fb97f6 100755
--- a/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
+++ b/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -37,7 +37,6 @@ if not bitbakepath:
sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
sys.exit(1)
-import bb.cache
import bb.cooker
import bb.providers
import bb.tinfoil
@@ -45,7 +44,7 @@ import bb.tinfoil
def get_fnlist(bbhandler, pkg_pn, preferred):
''' Get all recipe file names '''
if preferred:
- (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecache, pkg_pn)
+ (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
fn_list = []
for pn in sorted(pkg_pn):
@@ -58,11 +57,11 @@ def get_fnlist(bbhandler, pkg_pn, preferred):
def get_recipesdata(bbhandler, preferred):
''' Get data of all available recipes which have PACKAGECONFIG flags '''
- pkg_pn = bbhandler.cooker.recipecache.pkg_pn
+ pkg_pn = bbhandler.cooker.recipecaches[''].pkg_pn
data_dict = {}
for fn in get_fnlist(bbhandler, pkg_pn, preferred):
- data = bb.cache.Cache.loadDataFull(fn, bbhandler.cooker.collection.get_file_appends(fn), bbhandler.config_data)
+ data = bbhandler.parse_recipe_file(fn)
flags = data.getVarFlags("PACKAGECONFIG")
flags.pop('doc', None)
if flags:
@@ -86,7 +85,7 @@ def collect_flags(pkg_dict):
''' Collect available PACKAGECONFIG flags and all affected pkgs '''
# flag_dict = {'flag': ['pkg1', 'pkg2',...]}
flag_dict = {}
- for pkgname, flaglist in pkg_dict.iteritems():
+ for pkgname, flaglist in pkg_dict.items():
for flag in flaglist:
if flag in flag_dict:
flag_dict[flag].append(pkgname)
@@ -104,8 +103,8 @@ def display_pkgs(pkg_dict):
pkgname_len += 1
header = '%-*s%s' % (pkgname_len, str("RECIPE NAME"), str("PACKAGECONFIG FLAGS"))
- print header
- print str("").ljust(len(header), '=')
+ print(header)
+ print(str("").ljust(len(header), '='))
for pkgname in sorted(pkg_dict):
print('%-*s%s' % (pkgname_len, pkgname, ' '.join(pkg_dict[pkgname])))
@@ -115,28 +114,28 @@ def display_flags(flag_dict):
flag_len = len("PACKAGECONFIG FLAG") + 5
header = '%-*s%s' % (flag_len, str("PACKAGECONFIG FLAG"), str("RECIPE NAMES"))
- print header
- print str("").ljust(len(header), '=')
+ print(header)
+ print(str("").ljust(len(header), '='))
for flag in sorted(flag_dict):
print('%-*s%s' % (flag_len, flag, ' '.join(sorted(flag_dict[flag]))))
def display_all(data_dict):
''' Display all pkgs and PACKAGECONFIG information '''
- print str("").ljust(50, '=')
+ print(str("").ljust(50, '='))
for fn in data_dict:
print('%s' % data_dict[fn].getVar("P", True))
- print fn
+ print(fn)
packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or ''
if packageconfig.strip() == '':
packageconfig = 'None'
print('PACKAGECONFIG %s' % packageconfig)
- for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").iteritems():
+ for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").items():
if flag == "doc":
continue
print('PACKAGECONFIG[%s] %s' % (flag, flag_val))
- print ''
+ print('')
def main():
pkg_dict = {}
@@ -160,20 +159,20 @@ def main():
options, args = parser.parse_args(sys.argv)
- bbhandler = bb.tinfoil.Tinfoil()
- bbhandler.prepare()
- print("Gathering recipe data...")
- data_dict = get_recipesdata(bbhandler, options.preferred)
-
- if options.listtype == 'flags':
- pkg_dict = collect_pkgs(data_dict)
- flag_dict = collect_flags(pkg_dict)
- display_flags(flag_dict)
- elif options.listtype == 'recipes':
- pkg_dict = collect_pkgs(data_dict)
- display_pkgs(pkg_dict)
- elif options.listtype == 'all':
- display_all(data_dict)
+ with bb.tinfoil.Tinfoil() as bbhandler:
+ bbhandler.prepare()
+ print("Gathering recipe data...")
+ data_dict = get_recipesdata(bbhandler, options.preferred)
+
+ if options.listtype == 'flags':
+ pkg_dict = collect_pkgs(data_dict)
+ flag_dict = collect_flags(pkg_dict)
+ display_flags(flag_dict)
+ elif options.listtype == 'recipes':
+ pkg_dict = collect_pkgs(data_dict)
+ display_pkgs(pkg_dict)
+ elif options.listtype == 'all':
+ display_all(data_dict)
if __name__ == "__main__":
main()
diff --git a/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh b/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
index 333284ff5..d8db3c016 100755
--- a/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
+++ b/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
@@ -369,8 +369,8 @@ mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap"
# Installing to $DEVICE
#
debug "Mounting images and device in preparation for installation"
-mount -o loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG"
-mount -o loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img"
+mount -o ro,loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG"
+mount -o ro,loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img"
mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT"
mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT"
diff --git a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
index d93c943c6..f2ecf8d3f 100755
--- a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
+++ b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
@@ -97,7 +97,7 @@ class MakefileMaker:
# generate package variables
#
- for name, data in sorted(self.packages.iteritems()):
+ for name, data in sorted(self.packages.items()):
desc, deps, files = data
#
@@ -130,7 +130,7 @@ class MakefileMaker:
self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
- for name, data in sorted(self.packages.iteritems()):
+ for name, data in sorted(self.packages.items()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
@@ -153,7 +153,7 @@ if __name__ == "__main__":
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
- outfile = file( sys.argv[1], "w" )
+ outfile = open( sys.argv[1], "w" )
else:
outfile = sys.stdout
diff --git a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
index 367b4b8b4..2906cc66d 100755
--- a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
+++ b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
@@ -100,7 +100,7 @@ class MakefileMaker:
# generate package variables
#
- for name, data in sorted(self.packages.iteritems()):
+ for name, data in sorted(self.packages.items()):
desc, deps, files = data
#
@@ -133,7 +133,7 @@ class MakefileMaker:
self.out( 'SUMMARY_${PN}-modules="All Python modules"' )
line = 'RDEPENDS_${PN}-modules="'
- for name, data in sorted(self.packages.iteritems()):
+ for name, data in sorted(self.packages.items()):
if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
line += "%s " % name
@@ -156,7 +156,7 @@ if __name__ == "__main__":
os.unlink(sys.argv[1])
except Exception:
sys.exc_clear()
- outfile = file( sys.argv[1], "w" )
+ outfile = open( sys.argv[1], "w" )
else:
outfile = sys.stdout
@@ -167,7 +167,7 @@ if __name__ == "__main__":
#
m.addPackage( "${PN}-core", "Python interpreter and core modules", "${PN}-lang ${PN}-re ${PN}-reprlib ${PN}-codecs ${PN}-io ${PN}-math",
- "__future__.* _abcoll.* abc.* ast.* copy.* copyreg.* ConfigParser.* " +
+ "__future__.* _abcoll.* abc.* ast.* copy.* copyreg.* configparser.* " +
"genericpath.* getopt.* linecache.* new.* " +
"os.* posixpath.* struct.* " +
"warnings.* site.* stat.* " +
@@ -208,6 +208,9 @@ if __name__ == "__main__":
m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.*.so lib-dynload/audioop.*.so audiodev.* sunaudio.* sunau.* toaiff.*" )
+ m.addPackage( "${PN}-argparse", "Python command line argument parser", "${PN}-core ${PN}-codecs ${PN}-textutils",
+ "argparse.*" )
+
m.addPackage( "${PN}-asyncio", "Python Asynchronous I/O, event loop, coroutines and tasks", "${PN}-core",
"asyncio" )
@@ -217,8 +220,8 @@ if __name__ == "__main__":
m.addPackage( "${PN}-compile", "Python bytecode compilation support", "${PN}-core",
"py_compile.* compileall.*" )
- m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-codecs",
- "gzip.* zipfile.* tarfile.* lib-dynload/bz2.*.so" )
+ m.addPackage( "${PN}-compression", "Python high-level compression support", "${PN}-core ${PN}-codecs ${PN}-importlib ${PN}-threading ${PN}-shell",
+ "gzip.* zipfile.* tarfile.* lib-dynload/bz2.*.so lib-dynload/zlib.*.so" )
m.addPackage( "${PN}-crypt", "Python basic cryptographic and hashing support", "${PN}-core",
"hashlib.* md5.* sha.* lib-dynload/crypt.*.so lib-dynload/_hashlib.*.so lib-dynload/_sha256.*.so lib-dynload/_sha512.*.so" )
@@ -229,11 +232,11 @@ if __name__ == "__main__":
m.addPackage( "${PN}-curses", "Python curses support", "${PN}-core",
"curses lib-dynload/_curses.*.so lib-dynload/_curses_panel.*.so" ) # directory + low level module
- m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core",
+ m.addPackage( "${PN}-ctypes", "Python C types support", "${PN}-core ${PN}-subprocess",
"ctypes lib-dynload/_ctypes.*.so lib-dynload/_ctypes_test.*.so" ) # directory + low level module
m.addPackage( "${PN}-datetime", "Python calendar and time support", "${PN}-core ${PN}-codecs",
- "_strptime.* calendar.* lib-dynload/datetime.*.so" )
+ "_strptime.* calendar.* datetime.* lib-dynload/_datetime.*.so" )
m.addPackage( "${PN}-db", "Python file-based database support", "${PN}-core",
"anydbm.* dumbdbm.* whichdb.* dbm lib-dynload/_dbm.*.so" )
@@ -256,13 +259,16 @@ if __name__ == "__main__":
m.addPackage( "${PN}-email", "Python email support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
"imaplib.* email" ) # package
+ m.addPackage( "${PN}-enum", "Python support for enumerations", "${PN}-core",
+ "enum.*" )
+
m.addPackage( "${PN}-fcntl", "Python's fcntl interface", "${PN}-core",
"lib-dynload/fcntl.*.so" )
m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
- m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core",
+ m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core ${PN}-lang",
"importlib" )
m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core",
@@ -278,10 +284,10 @@ if __name__ == "__main__":
m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
"json lib-dynload/_json.*.so" ) # package
- m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core",
+ m.addPackage( "${PN}-lang", "Python low-level language support", "${PN}-core ${PN}-importlib",
"lib-dynload/_bisect.*.so lib-dynload/_collections.*.so lib-dynload/_heapq.*.so lib-dynload/_weakref.*.so lib-dynload/_functools.*.so " +
"lib-dynload/array.*.so lib-dynload/itertools.*.so lib-dynload/operator.*.so lib-dynload/parser.*.so " +
- "atexit.* bisect.* code.* codeop.* collections.* _collections_abc.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
+ "atexit.* bisect.* code.* codeop.* collections.* _collections_abc.* contextlib.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* operator.* symbol.* repr.* token.* " +
"tokenize.* traceback.* weakref.*" )
m.addPackage( "${PN}-logging", "Python logging support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
@@ -290,7 +296,7 @@ if __name__ == "__main__":
m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
"mailbox.*" )
- m.addPackage( "${PN}-math", "Python math support", "${PN}-core",
+ m.addPackage( "${PN}-math", "Python math support", "${PN}-core ${PN}-crypt",
"lib-dynload/cmath.*.so lib-dynload/math.*.so lib-dynload/_random.*.so random.* sets.*" )
m.addPackage( "${PN}-mime", "Python MIME handling APIs", "${PN}-core ${PN}-io",
@@ -336,11 +342,17 @@ if __name__ == "__main__":
m.addPackage( "${PN}-resource", "Python resource control interface", "${PN}-core",
"lib-dynload/resource.*.so" )
- m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re",
+ m.addPackage( "${PN}-selectors", "Python High-level I/O multiplexing", "${PN}-core",
+ "selectors.*" )
+
+ m.addPackage( "${PN}-shell", "Python shell-like functionality", "${PN}-core ${PN}-re ${PN}-compression",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
- m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
- "subprocess.*" )
+ m.addPackage( "${PN}-signal", "Python set handlers for asynchronous events support", "${PN}-core ${PN}-enum",
+ "signal.*" )
+
+ m.addPackage( "${PN}-subprocess", "Python subprocess support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle ${PN}-threading ${PN}-signal ${PN}-selectors",
+ "subprocess.* lib-dynload/_posixsubprocess.*.so" )
m.addPackage( "${PN}-sqlite3", "Python Sqlite3 database support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading",
"lib-dynload/_sqlite3.*.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
@@ -361,7 +373,7 @@ if __name__ == "__main__":
"test" ) # package
m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
- "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
+ "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* queue.*" )
m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
"lib-dynload/_tkinter.*.so lib-tk tkinter" ) # package
diff --git a/import-layers/yocto-poky/scripts/contrib/uncovered b/import-layers/yocto-poky/scripts/contrib/uncovered
new file mode 100755
index 000000000..a8399ad17
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/contrib/uncovered
@@ -0,0 +1,39 @@
+#!/bin/bash -eur
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Find python modules uncovered by oe-seltest
+#
+# Copyright (c) 2016, Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+#
+
+if [ ! "$#" -eq 1 -o -t 0 ] ; then
+ echo 'Usage: coverage report | ./scripts/contrib/uncovered <dir>' 1>&2
+ exit 1
+fi
+
+path=$(readlink -ev $1)
+
+if [ ! -d "$path" ] ; then
+ echo "directory $1 doesn't exist" 1>&2
+ exit 1
+fi
+
+diff -u <(grep "$path" | grep -v '0%$' | cut -f1 -d: | sort) \
+ <(find $path | xargs file | grep 'Python script' | cut -f1 -d:| sort) | \
+ grep "^+$path" | cut -c2-
diff --git a/import-layers/yocto-poky/scripts/contrib/verify-homepage.py b/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
index 265ff65d3..d39dd1d97 100755
--- a/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
+++ b/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# This script can be used to verify HOMEPAGE values for all recipes in
# the current configuration.
@@ -7,7 +7,7 @@
import sys
import os
import subprocess
-import urllib2
+import urllib.request
# Allow importing scripts/lib modules
@@ -33,30 +33,30 @@ def wgetHomepage(pn, homepage):
return 0
def verifyHomepage(bbhandler):
- pkg_pn = bbhandler.cooker.recipecache.pkg_pn
+ pkg_pn = bbhandler.cooker.recipecaches[''].pkg_pn
pnlist = sorted(pkg_pn)
count = 0
checked = []
for pn in pnlist:
for fn in pkg_pn[pn]:
# There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
- realfn, _ = bb.cache.Cache.virtualfn2realfn(fn)
+ realfn, _, _ = bb.cache.virtualfn2realfn(fn)
if realfn in checked:
continue
- data = bb.cache.Cache.loadDataFull(realfn, bbhandler.cooker.collection.get_file_appends(realfn), bbhandler.config_data)
+ data = bbhandler.parse_recipe_file(realfn)
homepage = data.getVar("HOMEPAGE", True)
if homepage:
try:
- urllib2.urlopen(homepage, timeout=5)
+ urllib.request.urlopen(homepage, timeout=5)
except Exception:
count = count + wgetHomepage(os.path.basename(realfn), homepage)
checked.append(realfn)
return count
if __name__=='__main__':
- bbhandler = bb.tinfoil.Tinfoil()
- bbhandler.prepare()
- logger.info("Start verifying HOMEPAGE:")
- failcount = verifyHomepage(bbhandler)
- logger.info("Finished verifying HOMEPAGE.")
- logger.info("Summary: %s failed" % failcount)
+ with bb.tinfoil.Tinfoil() as bbhandler:
+ bbhandler.prepare()
+ logger.info("Start verifying HOMEPAGE:")
+ failcount = verifyHomepage(bbhandler)
+ logger.info("Finished verifying HOMEPAGE.")
+ logger.info("Summary: %s failed" % failcount)
diff --git a/import-layers/yocto-poky/scripts/cp-noerror b/import-layers/yocto-poky/scripts/cp-noerror
index 28eb90d4a..35eb211be 100755
--- a/import-layers/yocto-poky/scripts/cp-noerror
+++ b/import-layers/yocto-poky/scripts/cp-noerror
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
# don't error.
@@ -33,16 +33,16 @@ def copytree(src, dst, symlinks=False, ignore=None):
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
- except shutil.Error, err:
+ except shutil.Error as err:
errors.extend(err.args[0])
- except EnvironmentError, why:
+ except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
- except OSError, why:
+ except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
- raise shutil.Error, errors
+ raise shutil.Error(errors)
try:
copytree(sys.argv[1], sys.argv[2])
diff --git a/import-layers/yocto-poky/scripts/create-pull-request b/import-layers/yocto-poky/scripts/create-pull-request
index 479ad6efc..a88f35a4a 100755
--- a/import-layers/yocto-poky/scripts/create-pull-request
+++ b/import-layers/yocto-poky/scripts/create-pull-request
@@ -39,13 +39,15 @@ Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to
-l local branch Local branch name (default: HEAD)
-c Create an RFC (Request for Comment) patch series
-h Display this help message
+ -a Automatically push local branch (-l) to remote branch (-b),
+ or set CPR_CONTRIB_AUTO_PUSH in env
-i commit_id Ending commit (default: HEAD)
-m msg_body_file The file containing a blurb to be inserted into the summary email
-o output_dir Specify the output directory for the messages (default: pull-PID)
-p prefix Use [prefix N/M] instead of [PATCH N/M] as the subject prefix
-r relative_to Starting commit (default: master)
-s subject The subject to be inserted into the summary email
- -u remote The git remote where the branch is located
+ -u remote The git remote where the branch is located, or set CPR_CONTRIB_REMOTE in env
-d relative_dir Generate patches relative to directory
Examples:
@@ -58,8 +60,9 @@ Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to
EOM
}
+REMOTE="$CPR_CONTRIB_REMOTE"
# Parse and validate arguments
-while getopts "b:cd:hi:m:o:p:r:s:u:l:" OPT; do
+while getopts "b:acd:hi:m:o:p:r:s:u:l:" OPT; do
case $OPT in
b)
BRANCH="$OPTARG"
@@ -101,35 +104,45 @@ while getopts "b:cd:hi:m:o:p:r:s:u:l:" OPT; do
;;
u)
REMOTE="$OPTARG"
- REMOTE_URL=$(git config remote.$REMOTE.url)
- if [ $? -ne 0 ]; then
- echo "ERROR: git config failed to find a url for '$REMOTE'"
- echo
- echo "To add a remote url for $REMOTE, use:"
- echo " git config remote.$REMOTE.url <url>"
- exit 1
- fi
-
- # Rewrite private URLs to public URLs
- # Determine the repository name for use in the WEB_URL later
- case "$REMOTE_URL" in
- *@*)
- USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
- PROTO_RE="[a-z][a-z+]*://"
- GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
- REMOTE_URL=${REMOTE_URL%.git}
- REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
- REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
- ;;
- *)
- echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
- echo " The pull and browse URLs will likely be incorrect"
- ;;
- esac
+ ;;
+ a)
+ CPR_CONTRIB_AUTO_PUSH="1"
;;
esac
done
+if [ -z "$REMOTE" ]; then
+ echo "ERROR: Missing parameter -u or CPR_CONTRIB_REMOTE in env, no git remote!"
+ usage
+ exit 1
+fi
+
+REMOTE_URL=$(git config remote.$REMOTE.url)
+if [ $? -ne 0 ]; then
+ echo "ERROR: git config failed to find a url for '$REMOTE'"
+ echo
+ echo "To add a remote url for $REMOTE, use:"
+ echo " git config remote.$REMOTE.url <url>"
+ exit 1
+fi
+
+# Rewrite private URLs to public URLs
+# Determine the repository name for use in the WEB_URL later
+case "$REMOTE_URL" in
+*@*)
+ USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+ PROTO_RE="[a-z][a-z+]*://"
+ GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
+ REMOTE_URL=${REMOTE_URL%.git}
+ REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
+ REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
+ ;;
+*)
+ echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
+ echo " The pull and browse URLs will likely be incorrect"
+ ;;
+esac
+
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
echo "NOTE: Assuming remote branch '$BRANCH', use -b to override."
@@ -140,12 +153,6 @@ if [ -z "$L_BRANCH" ]; then
echo "NOTE: Assuming local branch HEAD, use -l to override."
fi
-if [ -z "$REMOTE_URL" ]; then
- echo "ERROR: Missing parameter -u, no git remote!"
- usage
- exit 1
-fi
-
if [ $RFC -eq 1 ]; then
PREFIX="RFC $PREFIX"
fi
@@ -171,6 +178,11 @@ esac
# Perform a sanity test on the web URL. Issue a warning if it is not
# accessible, but do not abort as users may want to run offline.
if [ -n "$WEB_URL" ]; then
+ if [ "$CPR_CONTRIB_AUTO_PUSH" = "1" ]; then
+ echo "Pushing '$BRANCH' on '$REMOTE' as requested..."
+ git push $REMOTE $L_BRANCH:$BRANCH
+ echo ""
+ fi
wget --no-check-certificate -q $WEB_URL -O /dev/null
if [ $? -ne 0 ]; then
echo "WARNING: Branch '$BRANCH' was not found on the contrib git tree."
@@ -217,6 +229,7 @@ else
fi
if [ $? -ne 0 ]; then
echo "ERROR: git request-pull reported an error"
+ rm -rf $ODIR
exit 1
fi
@@ -249,7 +262,13 @@ if [ -n "$BODY" ]; then
sed -i "/BLURB HERE/ d" "$CL"
fi
-# If the user specified a subject, replace the SUBJECT token with it.
+# Set subject automatically if there is only one patch
+patch_cnt=`git log --pretty=oneline ${RELATIVE_TO}..${L_BRANCH} | wc -l`
+if [ -z "$SUBJECT" -a $patch_cnt -eq 1 ]; then
+ SUBJECT="`git log --format=%s ${RELATIVE_TO}..${L_BRANCH}`"
+fi
+
+# Replace the SUBJECT token with it.
if [ -n "$SUBJECT" ]; then
sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
fi
diff --git a/import-layers/yocto-poky/scripts/devtool b/import-layers/yocto-poky/scripts/devtool
index 478039065..0c32c502a 100755
--- a/import-layers/yocto-poky/scripts/devtool
+++ b/import-layers/yocto-poky/scripts/devtool
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# OpenEmbedded Development tool
#
@@ -22,7 +22,7 @@ import os
import argparse
import glob
import re
-import ConfigParser
+import configparser
import subprocess
import logging
@@ -51,12 +51,12 @@ class ConfigHandler(object):
def __init__(self, filename):
self.config_file = filename
- self.config_obj = ConfigParser.SafeConfigParser()
+ self.config_obj = configparser.SafeConfigParser()
def get(self, section, option, default=None):
try:
ret = self.config_obj.get(section, option)
- except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
+ except (configparser.NoOptionError, configparser.NoSectionError):
if default != None:
ret = default
else:
@@ -86,6 +86,11 @@ class ConfigHandler(object):
with open(self.config_file, 'w') as f:
self.config_obj.write(f)
+ def set(self, section, option, value):
+ if not self.config_obj.has_section(section):
+ self.config_obj.add_section(section)
+ self.config_obj.set(section, option, value)
+
class Context:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@@ -174,13 +179,16 @@ def _create_workspace(workspacedir, config, basepath):
# Add a README file
with open(os.path.join(workspacedir, 'README'), 'w') as f:
f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
- f.write('contain recipes and bbappends. In most instances you should use the\n')
+ f.write('contain recipes and bbappends that are currently being worked on. The idea\n')
+ f.write('is that the contents is temporary - once you have finished working on a\n')
+ f.write('recipe you use the appropriate method to move the files you have been\n')
+ f.write('working on to a proper layer. In most instances you should use the\n')
f.write('devtool utility to manage files within it rather than modifying files\n')
f.write('directly (although recipes added with "devtool add" will often need\n')
f.write('direct modification.)\n')
- f.write('\nIf you no longer need to use devtool you can remove the path to this\n')
- f.write('workspace layer from your conf/bblayers.conf file (and then delete the\n')
- f.write('layer, if you wish).\n')
+ f.write('\nIf you no longer need to use devtool or the workspace layer\'s contents\n')
+ f.write('you can remove the path to this workspace layer from your conf/bblayers.conf\n')
+ f.write('file (and then delete the layer, if you wish).\n')
f.write('\nNote that by default, if devtool fetches and unpacks source code, it\n')
f.write('will place it in a subdirectory of a "sources" subdirectory of the\n')
f.write('layer. If you prefer it to be elsewhere you can specify the source\n')
@@ -281,18 +289,17 @@ def main():
if global_args.bbpath is None:
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
- global_args.bbpath = tinfoil.config_data.getVar('BBPATH', True)
- else:
- tinfoil = None
+ try:
+ global_args.bbpath = tinfoil.config_data.getVar('BBPATH', True)
+ finally:
+ tinfoil.shutdown()
for path in [scripts_path] + global_args.bbpath.split(':'):
pluginpath = os.path.join(path, 'lib', 'devtool')
scriptutils.load_plugins(logger, plugins, pluginpath)
- if tinfoil:
- tinfoil.shutdown()
-
subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
subparsers.add_subparser_group('sdk', 'SDK maintenance', -2)
subparsers.add_subparser_group('advanced', 'Advanced', -1)
@@ -325,7 +332,7 @@ def main():
except DevtoolError as err:
if str(err):
logger.error(str(err))
- ret = 1
+ ret = err.exitcode
except argparse_oe.ArgumentUsageError as ae:
parser.error_subcommand(ae.message, ae.subcommand)
diff --git a/import-layers/yocto-poky/scripts/gen-lockedsig-cache b/import-layers/yocto-poky/scripts/gen-lockedsig-cache
index 0986a2165..49de74ed9 100755
--- a/import-layers/yocto-poky/scripts/gen-lockedsig-cache
+++ b/import-layers/yocto-poky/scripts/gen-lockedsig-cache
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import sys
@@ -15,15 +15,27 @@ def mkdir(d):
if len(sys.argv) < 5:
print("Incorrect number of arguments specified")
- print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring>")
+ print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
sys.exit(1)
+filterlist = []
+if len(sys.argv) > 5:
+ print('Reading filter file %s' % sys.argv[5])
+ with open(sys.argv[5]) as f:
+ for l in f.readlines():
+ if ":" in l:
+ filterlist.append(l.rstrip())
+
print('Reading %s' % sys.argv[1])
sigs = []
with open(sys.argv[1]) as f:
for l in f.readlines():
if ":" in l:
- sigs.append(l.split(":")[2].split()[0])
+ task, sig = l.split()[0].rsplit(':', 1)
+ if filterlist and not task in filterlist:
+ print('Filtering out %s' % task)
+ else:
+ sigs.append(sig)
print('Gathering file list')
files = set()
@@ -45,13 +57,14 @@ for f in files:
destdir = os.path.dirname(dst)
mkdir(destdir)
+ src = os.path.realpath(f)
if os.path.exists(dst):
os.remove(dst)
- if (os.stat(f).st_dev == os.stat(destdir).st_dev):
+ if (os.stat(src).st_dev == os.stat(destdir).st_dev):
print('linking')
- os.link(f, dst)
+ os.link(src, dst)
else:
print('copying')
- shutil.copyfile(f, dst)
+ shutil.copyfile(src, dst)
print('Done!')
diff --git a/import-layers/yocto-poky/scripts/lib/argparse_oe.py b/import-layers/yocto-poky/scripts/lib/argparse_oe.py
index bf3ebaddf..bf6eb1719 100644
--- a/import-layers/yocto-poky/scripts/lib/argparse_oe.py
+++ b/import-layers/yocto-poky/scripts/lib/argparse_oe.py
@@ -14,23 +14,35 @@ class ArgumentParser(argparse.ArgumentParser):
kwargs.setdefault('formatter_class', OeHelpFormatter)
self._subparser_groups = OrderedDict()
super(ArgumentParser, self).__init__(*args, **kwargs)
+ self._positionals.title = 'arguments'
+ self._optionals.title = 'options'
def error(self, message):
- sys.stderr.write('ERROR: %s\n' % message)
- self.print_help()
+ """error(message: string)
+
+ Prints a help message incorporating the message to stderr and
+ exits.
+ """
+ self._print_message('%s: error: %s\n' % (self.prog, message), sys.stderr)
+ self.print_help(sys.stderr)
sys.exit(2)
def error_subcommand(self, message, subcommand):
if subcommand:
- for action in self._actions:
- if isinstance(action, argparse._SubParsersAction):
- for choice, subparser in action.choices.items():
- if choice == subcommand:
- subparser.error(message)
- return
+ action = self._get_subparser_action()
+ try:
+ subparser = action._name_parser_map[subcommand]
+ except KeyError:
+ self.error('no subparser for name "%s"' % subcommand)
+ else:
+ subparser.error(message)
+
self.error(message)
def add_subparsers(self, *args, **kwargs):
+ if 'dest' not in kwargs:
+ kwargs['dest'] = '_subparser_name'
+
ret = super(ArgumentParser, self).add_subparsers(*args, **kwargs)
# Need a way of accessing the parent parser
ret._parent_parser = self
@@ -43,6 +55,38 @@ class ArgumentParser(argparse.ArgumentParser):
def add_subparser_group(self, groupname, groupdesc, order=0):
self._subparser_groups[groupname] = (groupdesc, order)
+ def parse_args(self, args=None, namespace=None):
+ """Parse arguments, using the correct subparser to show the error."""
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ message = 'unrecognized arguments: %s' % ' '.join(argv)
+ if self._subparsers:
+ subparser = self._get_subparser(args)
+ subparser.error(message)
+ else:
+ self.error(message)
+ sys.exit(2)
+ return args
+
+ def _get_subparser(self, args):
+ action = self._get_subparser_action()
+ if action.dest == argparse.SUPPRESS:
+ self.error('cannot get subparser, the subparser action dest is suppressed')
+
+ name = getattr(args, action.dest)
+ try:
+ return action._name_parser_map[name]
+ except KeyError:
+ self.error('no subparser for name "%s"' % name)
+
+ def _get_subparser_action(self):
+ if not self._subparsers:
+ self.error('cannot return the subparser action, no subparsers added')
+
+ for action in self._subparsers._group_actions:
+ if isinstance(action, argparse._SubParsersAction):
+ return action
+
class ArgumentSubParser(ArgumentParser):
def __init__(self, *args, **kwargs):
@@ -51,10 +95,6 @@ class ArgumentSubParser(ArgumentParser):
if 'order' in kwargs:
self._order = kwargs.pop('order')
super(ArgumentSubParser, self).__init__(*args, **kwargs)
- for agroup in self._action_groups:
- if agroup.title == 'optional arguments':
- agroup.title = 'options'
- break
def parse_known_args(self, args=None, namespace=None):
# This works around argparse not handling optional positional arguments being
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/engine.py b/import-layers/yocto-poky/scripts/lib/bsp/engine.py
index 66e2162ea..07a15bb90 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/engine.py
+++ b/import-layers/yocto-poky/scripts/lib/bsp/engine.py
@@ -34,23 +34,22 @@
import os
import sys
from abc import ABCMeta, abstractmethod
-from tags import *
+from .tags import *
import shlex
import json
import subprocess
import shutil
-class Line():
+class Line(metaclass=ABCMeta):
"""
Generic (abstract) container representing a line that will appear
in the BSP-generating program.
"""
- __metaclass__ = ABCMeta
def __init__(self, line):
self.line = line
self.generated_line = ""
- self.prio = sys.maxint
+ self.prio = sys.maxsize
self.discard = False
@abstractmethod
@@ -155,7 +154,7 @@ class InputLine(Line):
try:
self.prio = int(props["prio"])
except KeyError:
- self.prio = sys.maxint
+ self.prio = sys.maxsize
def gen(self, context = None):
try:
@@ -201,7 +200,7 @@ class EditBoxInputLine(InputLine):
msg += " [default: " + default_choice + "]"
- line = name + " = default(raw_input(\"" + msg + " \"), " + name + ")"
+ line = name + " = default(input(\"" + msg + " \"), " + name + ")"
return line
@@ -314,16 +313,15 @@ class BooleanInputLine(InputLine):
msg += " [default: " + default_choice + "]"
- line = name + " = boolean(raw_input(\"" + msg + " \"), " + name + ")"
+ line = name + " = boolean(input(\"" + msg + " \"), " + name + ")"
return line
-class ListInputLine(InputLine):
+class ListInputLine(InputLine, metaclass=ABCMeta):
"""
Base class for List-based Input lines. e.g. Choicelist, Checklist.
"""
- __metaclass__ = ABCMeta
def __init__(self, props, tag, lineno):
InputLine.__init__(self, props, tag, lineno)
@@ -464,9 +462,9 @@ class ListInputLine(InputLine):
choices_str = self.gen_choices_str(choicepairs)
choices_val_list = self.gen_choices_val_list(choicepairs)
if checklist:
- choiceval = default(find_choicevals(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice)
+ choiceval = default(find_choicevals(input(msg + "\n" + choices_str), choices_val_list), default_choice)
else:
- choiceval = default(find_choiceval(raw_input(msg + "\n" + choices_str), choices_val_list), default_choice)
+ choiceval = default(find_choiceval(input(msg + "\n" + choices_str), choices_val_list), default_choice)
return choiceval
@@ -540,12 +538,12 @@ def get_verified_git_repo(input_str, name):
"""
msg = input_str.strip() + " "
- giturl = default(raw_input(msg), name)
+ giturl = default(input(msg), name)
while True:
if verify_git_repo(giturl):
return giturl
- giturl = default(raw_input(msg), name)
+ giturl = default(input(msg), name)
def get_verified_file(input_str, name, filename_can_be_null):
@@ -555,14 +553,14 @@ def get_verified_file(input_str, name, filename_can_be_null):
"""
msg = input_str.strip() + " "
- filename = default(raw_input(msg), name)
+ filename = default(input(msg), name)
while True:
if not filename and filename_can_be_null:
return filename
if os.path.isfile(filename):
return filename
- filename = default(raw_input(msg), name)
+ filename = default(input(msg), name)
def replace_file(replace_this, with_this):
@@ -1263,13 +1261,13 @@ def conditional_filename(filename):
return None
end = filename.find(CLOSE_TAG, opentag_start)
if end == -1:
- print "No close tag found for open tag in filename %s" % filename
+ print("No close tag found for open tag in filename %s" % filename)
sys.exit(1)
# we have a {{ tag i.e. code
tag = filename[opentag_start + len(OPEN_TAG):end].strip()
if not tag.lstrip().startswith(IF_TAG):
- print "Only 'if' tags are allowed in file or directory names, filename: %s" % filename
+ print("Only 'if' tags are allowed in file or directory names, filename: %s" % filename)
sys.exit(1)
return CodeLine(tag)
@@ -1286,7 +1284,7 @@ class InputLineGroup(InputLine):
def __init__(self, codeline):
InputLine.__init__(self, {}, "", 0)
self.group = []
- self.prio = sys.maxint
+ self.prio = sys.maxsize
self.group.append(codeline)
def append(self, line):
@@ -1364,7 +1362,7 @@ def run_program_lines(linelist, codedump):
of = open("bspgen.out", "w")
of.write(buf)
of.close()
- exec buf
+ exec(buf)
def gen_target(files, context = None):
@@ -1387,7 +1385,7 @@ def gen_supplied_property_vals(properties, program_lines):
Generate user-specified entries for input values instead of
generating input prompts.
"""
- for name, val in properties.iteritems():
+ for name, val in properties.items():
program_line = name + " = \"" + val + "\""
program_lines.append(program_line)
@@ -1515,7 +1513,7 @@ def expand_targets(context, bsp_output_dir, expand_common=True):
arches = os.listdir(arch_path)
if arch not in arches or arch == "common":
- print "Invalid karch, exiting\n"
+ print("Invalid karch, exiting\n")
sys.exit(1)
target = os.path.join(arch_path, arch)
@@ -1541,7 +1539,7 @@ def yocto_common_create(machine, target, scripts_path, layer_output_dir, codedum
expand_common - boolean, use the contents of (for bsp layers) arch/common
"""
if os.path.exists(layer_output_dir):
- print "\nlayer output dir already exists, exiting. (%s)" % layer_output_dir
+ print("\nlayer output dir already exists, exiting. (%s)" % layer_output_dir)
sys.exit(1)
properties = None
@@ -1549,11 +1547,13 @@ def yocto_common_create(machine, target, scripts_path, layer_output_dir, codedum
if properties_file:
try:
infile = open(properties_file, "r")
+ properties = json.load(infile)
except IOError:
- print "Couldn't open properties file %s for reading, exiting" % properties_file
+ print("Couldn't open properties file %s for reading, exiting" % properties_file)
+ sys.exit(1)
+ except ValueError:
+ print("Wrong format on properties file %s, exiting" % properties_file)
sys.exit(1)
-
- properties = json.load(infile)
if properties_str and not properties:
properties = json.loads(properties_str)
@@ -1597,8 +1597,8 @@ def yocto_layer_create(layer_name, scripts_path, layer_output_dir, codedump, pro
"""
yocto_common_create(layer_name, "layer", scripts_path, layer_output_dir, codedump, properties_file, properties, False)
- print "\nNew layer created in %s.\n" % (layer_output_dir)
- print "Don't forget to add it to your BBLAYERS (for details see %s/README)." % (layer_output_dir)
+ print("\nNew layer created in %s.\n" % layer_output_dir)
+ print("Don't forget to add it to your BBLAYERS (for details see %s/README)." % layer_output_dir)
def yocto_bsp_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties=None):
@@ -1616,21 +1616,21 @@ def yocto_bsp_create(machine, arch, scripts_path, bsp_output_dir, codedump, prop
"""
yocto_common_create(machine, arch, scripts_path, bsp_output_dir, codedump, properties_file, properties)
- print "\nNew %s BSP created in %s" % (arch, bsp_output_dir)
+ print("\nNew %s BSP created in %s" % (arch, bsp_output_dir))
def print_dict(items, indent = 0):
"""
Print the values in a possibly nested dictionary.
"""
- for key, val in items.iteritems():
- print " "*indent + "\"%s\" :" % key,
+ for key, val in items.items():
+ print(" "*indent + "\"%s\" :" % key)
if type(val) == dict:
- print "{"
+ print("{")
print_dict(val, indent + 1)
- print " "*indent + "}"
+ print(" "*indent + "}")
else:
- print "%s" % val
+ print("%s" % val)
def get_properties(input_lines):
@@ -1681,7 +1681,7 @@ def yocto_layer_list_properties(arch, scripts_path, properties_file, expand_comm
try:
of = open(properties_file, "w")
except IOError:
- print "Couldn't open properties file %s for writing, exiting" % properties_file
+ print("Couldn't open properties file %s for writing, exiting" % properties_file)
sys.exit(1)
json.dump(properties, of, indent=1)
@@ -1755,10 +1755,10 @@ def print_values(type, values_list):
"""
if type == "choicelist":
for value in values_list:
- print "[\"%s\", \"%s\"]" % (value[0], value[1])
+ print("[\"%s\", \"%s\"]" % (value[0], value[1]))
elif type == "boolean":
for value in values_list:
- print "[\"%s\", \"%s\"]" % (value[0], value[1])
+ print("[\"%s\", \"%s\"]" % (value[0], value[1]))
def yocto_layer_list_property_values(arch, property, scripts_path, properties_file, expand_common=True):
@@ -1789,7 +1789,7 @@ def yocto_layer_list_property_values(arch, property, scripts_path, properties_fi
input_line = find_input_line(property, input_lines)
if not input_line:
- print "Couldn't find values for property %s" % property
+ print("Couldn't find values for property %s" % property)
return
values_list = []
@@ -1818,7 +1818,7 @@ def yocto_layer_list_property_values(arch, property, scripts_path, properties_fi
try:
of = open(properties_file, "w")
except IOError:
- print "Couldn't open properties file %s for writing, exiting" % properties_file
+ print("Couldn't open properties file %s for writing, exiting" % properties_file)
sys.exit(1)
json.dump(values_list, of)
@@ -1826,44 +1826,28 @@ def yocto_layer_list_property_values(arch, property, scripts_path, properties_fi
print_values(type, values_list)
-def yocto_bsp_list(args, scripts_path, properties_file):
+def yocto_bsp_list(args, scripts_path):
"""
Print available architectures, or the complete list of properties
defined by the BSP, or the possible values for a particular BSP
property.
"""
- if len(args) < 1:
- return False
-
- if args[0] == "karch":
+ if args.karch == "karch":
lib_path = scripts_path + '/lib'
bsp_path = lib_path + '/bsp'
arch_path = bsp_path + '/substrate/target/arch'
- print "Architectures available:"
+ print("Architectures available:")
for arch in os.listdir(arch_path):
if arch == "common" or arch == "layer":
continue
- print " %s" % arch
- return True
- else:
- arch = args[0]
-
- if len(args) < 2 or len(args) > 3:
- return False
-
- if len(args) == 2:
- if args[1] == "properties":
- yocto_layer_list_properties(arch, scripts_path, properties_file)
- else:
- return False
+ print(" %s" % arch)
+ return
- if len(args) == 3:
- if args[1] == "property":
- yocto_layer_list_property_values(arch, args[2], scripts_path, properties_file)
- else:
- return False
+ if args.properties:
+ yocto_layer_list_properties(args.karch, scripts_path, args.properties_file)
+ elif args.property:
+ yocto_layer_list_property_values(args.karch, args.property, scripts_path, args.properties_file)
- return True
def yocto_layer_list(args, scripts_path, properties_file):
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/help.py b/import-layers/yocto-poky/scripts/lib/bsp/help.py
index 85a09dd29..4f0d7721f 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/help.py
+++ b/import-layers/yocto-poky/scripts/lib/bsp/help.py
@@ -42,7 +42,7 @@ def display_help(subcommand, subcommands):
help = subcommands.get(subcommand, subcommand_error)[2]
pager = subprocess.Popen('less', stdin=subprocess.PIPE)
- pager.communicate(help)
+ pager.communicate(bytes(help, 'UTF-8'))
return True
@@ -183,9 +183,9 @@ DESCRIPTION
yocto_bsp_list_usage = """
usage: yocto-bsp list karch
- yocto-bsp list <karch> properties
+ yocto-bsp list <karch> --properties
[-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
- yocto-bsp list <karch> property <xxx>
+ yocto-bsp list <karch> --property <xxx>
[-o <JSON PROPERTY FILE> | --outfile <JSON PROPERTY_FILE>]
This command enumerates the complete set of possible values for a
@@ -213,9 +213,9 @@ NAME
SYNOPSIS
yocto-bsp list karch
- yocto-bsp list <karch> properties
+ yocto-bsp list <karch> --properties
[--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
- yocto-bsp list <karch> property <xxx>
+ yocto-bsp list <karch> --property <xxx>
[--o <JSON PROPERTY FILE> | -outfile <JSON PROPERTY_FILE>]
DESCRIPTION
@@ -246,9 +246,9 @@ DESCRIPTION
object will consist of the set of name:value pairs corresponding
to the (possibly nested) dictionary of properties defined by the
input statements used by the BSP. Some example output for the
- 'list properties' command:
+ 'list --properties' command:
- $ yocto-bsp list arm properties
+ $ yocto-bsp list arm --properties
"touchscreen" : {
"msg" : Does your BSP have a touchscreen? (y/N)
"default" : n
@@ -310,11 +310,11 @@ DESCRIPTION
name:value pairs corresponding to the array of property values
associated with the property.
- $ yocto-bsp list i386 property xserver_choice
+ $ yocto-bsp list i386 --property xserver_choice
["xserver_vesa", "VESA xserver support"]
["xserver_i915", "i915 xserver support"]
- $ yocto-bsp list arm property base_kbranch_linux_yocto_3_0
+ $ yocto-bsp list arm --property base_kbranch_linux_yocto_3_0
Getting branches from remote repo git://git.yoctoproject.org/linux-yocto-3.0...
["yocto/base", "yocto/base"]
["yocto/eg20t", "yocto/eg20t"]
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
index ba68b60fc..a3ee325a8 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
+++ b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
@@ -29,11 +29,10 @@
import sys
import os
import shutil
-from tags import *
+from .tags import *
import glob
import subprocess
-from engine import create_context
-
+from .engine import create_context
def find_bblayers():
"""
@@ -42,7 +41,7 @@ def find_bblayers():
try:
builddir = os.environ["BUILDDIR"]
except KeyError:
- print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
sys.exit(1)
bblayers_conf = os.path.join(builddir, "conf/bblayers.conf")
@@ -50,10 +49,10 @@ def find_bblayers():
bitbake_env_cmd = "bitbake -e"
bitbake_env_lines = subprocess.Popen(bitbake_env_cmd, shell=True,
- stdout=subprocess.PIPE).stdout.read()
+ stdout=subprocess.PIPE).stdout.read().decode('utf-8')
if not bitbake_env_lines:
- print "Couldn't get '%s' output, exiting." % bitbake_env_cmd
+ print("Couldn't get '%s' output, exiting." % bitbake_env_cmd)
sys.exit(1)
for line in bitbake_env_lines.split('\n'):
@@ -62,8 +61,7 @@ def find_bblayers():
break
if not bblayers:
- print "Couldn't find BBLAYERS in %s output, exiting." % \
- bitbake_env_cmd
+ print("Couldn't find BBLAYERS in %s output, exiting." % bitbake_env_cmd)
sys.exit(1)
raw_layers = bblayers.split()
@@ -110,8 +108,8 @@ def find_bsp_layer(machine):
if layer.endswith(machine):
return layer
- print "Unable to find the BSP layer for machine %s." % machine
- print "Please make sure it is listed in bblayers.conf"
+ print("Unable to find the BSP layer for machine %s." % machine)
+ print("Please make sure it is listed in bblayers.conf")
sys.exit(1)
@@ -190,8 +188,8 @@ def yocto_kernel_config_list(scripts_path, machine):
"""
config_items = read_config_items(scripts_path, machine)
- print "The current set of machine-specific kernel config items for %s is:" % machine
- print gen_choices_str(config_items)
+ print("The current set of machine-specific kernel config items for %s is:" % machine)
+ print(gen_choices_str(config_items))
def yocto_kernel_config_rm(scripts_path, machine):
@@ -202,9 +200,9 @@ def yocto_kernel_config_rm(scripts_path, machine):
"""
config_items = read_config_items(scripts_path, machine)
- print "Specify the kernel config items to remove:"
- input = raw_input(gen_choices_str(config_items))
- rm_choices = input.split()
+ print("Specify the kernel config items to remove:")
+ inp = input(gen_choices_str(config_items))
+ rm_choices = inp.split()
rm_choices.sort()
removed = []
@@ -213,18 +211,18 @@ def yocto_kernel_config_rm(scripts_path, machine):
try:
idx = int(choice) - 1
except ValueError:
- print "Invalid choice (%s), exiting" % choice
+ print("Invalid choice (%s), exiting" % choice)
sys.exit(1)
if idx < 0 or idx >= len(config_items):
- print "Invalid choice (%d), exiting" % (idx + 1)
+ print("Invalid choice (%d), exiting" % (idx + 1))
sys.exit(1)
removed.append(config_items.pop(idx))
write_config_items(scripts_path, machine, config_items)
- print "Removed items:"
+ print("Removed items:")
for r in removed:
- print "\t%s" % r
+ print("\t%s" % r)
def yocto_kernel_config_add(scripts_path, machine, config_items):
@@ -239,7 +237,7 @@ def yocto_kernel_config_add(scripts_path, machine, config_items):
for item in config_items:
if not item.startswith("CONFIG") or (not "=y" in item and not "=m" in item):
- print "Invalid config item (%s), exiting" % item
+ print("Invalid config item (%s), exiting" % item)
sys.exit(1)
if item not in cur_items and item not in new_items:
new_items.append(item)
@@ -249,16 +247,16 @@ def yocto_kernel_config_add(scripts_path, machine, config_items):
if len(new_items) > 0:
cur_items.extend(new_items)
write_config_items(scripts_path, machine, cur_items)
- print "Added item%s:" % ("" if len(new_items)==1 else "s")
+ print("Added item%s:" % ("" if len(new_items)==1 else "s"))
for n in new_items:
- print "\t%s" % n
+ print("\t%s" % n)
if len(dup_items) > 0:
output="The following item%s already exist%s in the current configuration, ignoring %s:" % \
(("","s", "it") if len(dup_items)==1 else ("s", "", "them" ))
- print output
+ print(output)
for n in dup_items:
- print "\t%s" % n
+ print("\t%s" % n)
def find_current_kernel(bsp_layer, machine):
"""
@@ -347,8 +345,8 @@ def yocto_kernel_patch_list(scripts_path, machine):
"""
patches = read_patch_items(scripts_path, machine)
- print "The current set of machine-specific patches for %s is:" % machine
- print gen_choices_str(patches)
+ print("The current set of machine-specific patches for %s is:" % machine)
+ print(gen_choices_str(patches))
def yocto_kernel_patch_rm(scripts_path, machine):
@@ -358,26 +356,26 @@ def yocto_kernel_patch_rm(scripts_path, machine):
"""
patches = read_patch_items(scripts_path, machine)
- print "Specify the patches to remove:"
- input = raw_input(gen_choices_str(patches))
- rm_choices = input.split()
+ print("Specify the patches to remove:")
+ inp = input(gen_choices_str(patches))
+ rm_choices = inp.split()
rm_choices.sort()
removed = []
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
- print "Couldn't rm patch(es) since we couldn't find a 'files' dir"
+ print("Couldn't rm patch(es) since we couldn't find a 'files' dir")
sys.exit(1)
for choice in reversed(rm_choices):
try:
idx = int(choice) - 1
except ValueError:
- print "Invalid choice (%s), exiting" % choice
+ print("Invalid choice (%s), exiting" % choice)
sys.exit(1)
if idx < 0 or idx >= len(patches):
- print "Invalid choice (%d), exiting" % (idx + 1)
+ print("Invalid choice (%d), exiting" % (idx + 1))
sys.exit(1)
filesdir_patch = os.path.join(filesdir, patches[idx])
if os.path.isfile(filesdir_patch):
@@ -387,9 +385,9 @@ def yocto_kernel_patch_rm(scripts_path, machine):
write_patch_items(scripts_path, machine, patches)
- print "Removed patches:"
+ print("Removed patches:")
for r in removed:
- print "\t%s" % r
+ print("\t%s" % r)
def yocto_kernel_patch_add(scripts_path, machine, patches):
@@ -401,19 +399,19 @@ def yocto_kernel_patch_add(scripts_path, machine, patches):
for patch in patches:
if os.path.basename(patch) in existing_patches:
- print "Couldn't add patch (%s) since it's already been added" % os.path.basename(patch)
+ print("Couldn't add patch (%s) since it's already been added" % os.path.basename(patch))
sys.exit(1)
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
- print "Couldn't add patch (%s) since we couldn't find a 'files' dir to add it to" % os.path.basename(patch)
+ print("Couldn't add patch (%s) since we couldn't find a 'files' dir to add it to" % os.path.basename(patch))
sys.exit(1)
new_patches = []
for patch in patches:
if not os.path.isfile(patch):
- print "Couldn't find patch (%s), exiting" % patch
+ print("Couldn't find patch (%s), exiting" % patch)
sys.exit(1)
basename = os.path.basename(patch)
filesdir_patch = os.path.join(filesdir, basename)
@@ -424,9 +422,9 @@ def yocto_kernel_patch_add(scripts_path, machine, patches):
cur_items.extend(new_patches)
write_patch_items(scripts_path, machine, cur_items)
- print "Added patches:"
+ print("Added patches:")
for n in new_patches:
- print "\t%s" % n
+ print("\t%s" % n)
def inc_pr(line):
@@ -461,7 +459,7 @@ def kernel_contents_changed(scripts_path, machine):
kernel = find_current_kernel(layer, machine)
if not kernel:
- print "Couldn't determine the kernel for this BSP, exiting."
+ print("Couldn't determine the kernel for this BSP, exiting.")
sys.exit(1)
kernel_bbfile = os.path.join(layer, "recipes-kernel/linux/" + kernel + ".bbappend")
@@ -597,8 +595,8 @@ def yocto_kernel_feature_list(scripts_path, machine):
"""
features = read_features(scripts_path, machine)
- print "The current set of machine-specific features for %s is:" % machine
- print gen_choices_str(features)
+ print("The current set of machine-specific features for %s is:" % machine)
+ print(gen_choices_str(features))
def yocto_kernel_feature_rm(scripts_path, machine):
@@ -609,9 +607,9 @@ def yocto_kernel_feature_rm(scripts_path, machine):
"""
features = read_features(scripts_path, machine)
- print "Specify the features to remove:"
- input = raw_input(gen_choices_str(features))
- rm_choices = input.split()
+ print("Specify the features to remove:")
+ inp = input(gen_choices_str(features))
+ rm_choices = inp.split()
rm_choices.sort()
removed = []
@@ -620,18 +618,18 @@ def yocto_kernel_feature_rm(scripts_path, machine):
try:
idx = int(choice) - 1
except ValueError:
- print "Invalid choice (%s), exiting" % choice
+ print("Invalid choice (%s), exiting" % choice)
sys.exit(1)
if idx < 0 or idx >= len(features):
- print "Invalid choice (%d), exiting" % (idx + 1)
+ print("Invalid choice (%d), exiting" % (idx + 1))
sys.exit(1)
removed.append(features.pop(idx))
write_features(scripts_path, machine, features)
- print "Removed features:"
+ print("Removed features:")
for r in removed:
- print "\t%s" % r
+ print("\t%s" % r)
def yocto_kernel_feature_add(scripts_path, machine, features):
@@ -643,7 +641,7 @@ def yocto_kernel_feature_add(scripts_path, machine, features):
for item in features:
if not item.endswith(".scc"):
- print "Invalid feature (%s), exiting" % item
+ print("Invalid feature (%s), exiting" % item)
sys.exit(1)
new_items.append(item)
@@ -652,9 +650,9 @@ def yocto_kernel_feature_add(scripts_path, machine, features):
write_features(scripts_path, machine, cur_items)
- print "Added features:"
+ print("Added features:")
for n in new_items:
- print "\t%s" % n
+ print("\t%s" % n)
def find_feature_url(git_url):
@@ -714,7 +712,7 @@ def print_feature_descs(layer, feature_dir):
feature_dir + "/" + file)
f = open(fullpath)
feature_desc = find_feature_desc(f.readlines())
- print feature_dir + "/" + file + ": " + feature_desc
+ print(feature_dir + "/" + file + ": " + feature_desc)
def yocto_kernel_available_features_list(scripts_path, machine):
@@ -725,7 +723,7 @@ def yocto_kernel_available_features_list(scripts_path, machine):
layer = find_bsp_layer(machine)
kernel = find_current_kernel(layer, machine)
if not kernel:
- print "Couldn't determine the kernel for this BSP, exiting."
+ print("Couldn't determine the kernel for this BSP, exiting.")
sys.exit(1)
context = create_context(machine, "arch", scripts_path)
@@ -735,9 +733,9 @@ def yocto_kernel_available_features_list(scripts_path, machine):
feature_url = find_feature_url(giturl)
feature_cmd = "wget -q -O - " + feature_url
- tmp = subprocess.Popen(feature_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+ tmp = subprocess.Popen(feature_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
- print "The current set of kernel features available to %s is:\n" % machine
+ print("The current set of kernel features available to %s is:\n" % machine)
if tmp:
tmpline = tmp.split("\n")
@@ -754,9 +752,9 @@ def yocto_kernel_available_features_list(scripts_path, machine):
feature_type = feature_def[0].strip()
feature = feature_def[1].strip()
desc = get_feature_desc(giturl, feature)
- print "%s: %s" % (feature, desc)
+ print("%s: %s" % (feature, desc))
- print "[local]"
+ print("[local]")
print_feature_descs(layer, "cfg")
print_feature_descs(layer, "features")
@@ -786,7 +784,7 @@ def get_feature_desc(git_url, feature):
"""
feature_desc_url = find_feature_desc_url(git_url, feature)
feature_desc_cmd = "wget -q -O - " + feature_desc_url
- tmp = subprocess.Popen(feature_desc_cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+ tmp = subprocess.Popen(feature_desc_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
return find_feature_desc(tmp.split("\n"))
@@ -800,7 +798,7 @@ def yocto_kernel_feature_describe(scripts_path, machine, feature):
kernel = find_current_kernel(layer, machine)
if not kernel:
- print "Couldn't determine the kernel for this BSP, exiting."
+ print("Couldn't determine the kernel for this BSP, exiting.")
sys.exit(1)
context = create_context(machine, "arch", scripts_path)
@@ -810,7 +808,7 @@ def yocto_kernel_feature_describe(scripts_path, machine, feature):
desc = get_feature_desc(giturl, feature)
- print desc
+ print(desc)
def check_feature_name(feature_name):
@@ -818,11 +816,11 @@ def check_feature_name(feature_name):
Sanity-check the feature name for create/destroy. Return False if not OK.
"""
if not feature_name.endswith(".scc"):
- print "Invalid feature name (must end with .scc) [%s], exiting" % feature_name
+ print("Invalid feature name (must end with .scc) [%s], exiting" % feature_name)
return False
if "/" in feature_name:
- print "Invalid feature name (don't specify directory) [%s], exiting" % feature_name
+ print("Invalid feature name (don't specify directory) [%s], exiting" % feature_name)
return False
return True
@@ -836,11 +834,11 @@ def check_create_input(feature_items):
return False
if feature_items[1].endswith(".patch") or feature_items[1].startswith("CONFIG_"):
- print "Missing description and/or compatibilty [%s], exiting" % feature_items[1]
+ print("Missing description and/or compatibilty [%s], exiting" % feature_items[1])
return False
if feature_items[2].endswith(".patch") or feature_items[2].startswith("CONFIG_"):
- print "Missing description and/or compatibility [%s], exiting" % feature_items[1]
+ print("Missing description and/or compatibility [%s], exiting" % feature_items[1])
return False
return True
@@ -868,7 +866,7 @@ def yocto_kernel_feature_create(scripts_path, machine, feature_items):
if ("=y" in item or "=m" in item):
cfg_items.append(item)
else:
- print "Invalid feature item (must be .patch or CONFIG_*) [%s], exiting" % item
+ print("Invalid feature item (must be .patch or CONFIG_*) [%s], exiting" % item)
sys.exit(1)
feature_dirname = "cfg"
@@ -877,7 +875,7 @@ def yocto_kernel_feature_create(scripts_path, machine, feature_items):
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
- print "Couldn't add feature (%s), no 'files' dir found" % feature
+ print("Couldn't add feature (%s), no 'files' dir found" % feature)
sys.exit(1)
featdir = os.path.join(filesdir, feature_dirname)
@@ -886,7 +884,7 @@ def yocto_kernel_feature_create(scripts_path, machine, feature_items):
for patch in patches:
if not os.path.isfile(patch):
- print "Couldn't find patch (%s), exiting" % patch
+ print("Couldn't find patch (%s), exiting" % patch)
sys.exit(1)
basename = os.path.basename(patch)
featdir_patch = os.path.join(featdir, basename)
@@ -910,8 +908,8 @@ def yocto_kernel_feature_create(scripts_path, machine, feature_items):
new_feature_file.write("kconf non-hardware " + feature_basename + ".cfg\n")
new_feature_file.close()
- print "Added feature:"
- print "\t%s" % feature_dirname + "/" + feature
+ print("Added feature:")
+ print("\t%s" % feature_dirname + "/" + feature)
def feature_in_use(scripts_path, machine, feature):
@@ -949,18 +947,18 @@ def yocto_kernel_feature_destroy(scripts_path, machine, feature):
if feature_in_use(scripts_path, machine, "features/" + feature) or \
feature_in_use(scripts_path, machine, "cfg/" + feature):
- print "Feature %s is in use (use 'feature rm' to un-use it first), exiting" % feature
+ print("Feature %s is in use (use 'feature rm' to un-use it first), exiting" % feature)
sys.exit(1)
filesdir = find_filesdir(scripts_path, machine)
if not filesdir:
- print "Couldn't destroy feature (%s), no 'files' dir found" % feature
+ print("Couldn't destroy feature (%s), no 'files' dir found" % feature)
sys.exit(1)
feature_dirname = "features"
featdir = os.path.join(filesdir, feature_dirname)
if not os.path.exists(featdir):
- print "Couldn't find feature directory (%s)" % feature_dirname
+ print("Couldn't find feature directory (%s)" % feature_dirname)
sys.exit(1)
feature_fqn = os.path.join(featdir, feature)
@@ -968,11 +966,11 @@ def yocto_kernel_feature_destroy(scripts_path, machine, feature):
feature_dirname = "cfg"
featdir = os.path.join(filesdir, feature_dirname)
if not os.path.exists(featdir):
- print "Couldn't find feature directory (%s)" % feature_dirname
+ print("Couldn't find feature directory (%s)" % feature_dirname)
sys.exit(1)
feature_fqn = os.path.join(featdir, feature_filename)
if not os.path.exists(feature_fqn):
- print "Couldn't find feature (%s)" % feature
+ print("Couldn't find feature (%s)" % feature)
sys.exit(1)
f = open(feature_fqn, "r")
@@ -989,8 +987,8 @@ def yocto_kernel_feature_destroy(scripts_path, machine, feature):
feature_remove(scripts_path, machine, feature)
- print "Removed feature:"
- print "\t%s" % feature_dirname + "/" + feature
+ print("Removed feature:")
+ print("\t%s" % feature_dirname + "/" + feature)
def base_branches(context):
@@ -999,10 +997,10 @@ def base_branches(context):
"""
giturl = find_giturl(context)
- print "Getting branches from remote repo %s..." % giturl
+ print("Getting branches from remote repo %s..." % giturl)
gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
- tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+ tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
branches = []
@@ -1029,10 +1027,10 @@ def all_branches(context):
"""
giturl = find_giturl(context)
- print "Getting branches from remote repo %s..." % giturl
+ print("Getting branches from remote repo %s..." % giturl)
gitcmd = "git ls-remote %s *heads* 2>&1" % (giturl)
- tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read()
+ tmp = subprocess.Popen(gitcmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
branches = []
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc
index 405972d98..8a881574d 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH arm
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..7c0df8bcd
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf
index d5abe4f61..4745c1cc5 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/conf/machine/machine.conf
@@ -33,15 +33,7 @@ MACHINE_FEATURES += "wifi efi pcbios"
{{ input type:"boolean" name:"xserver" prio:"50" msg:"Do you need support for X? (y/n)" default:"y" }}
-{{ if xserver == "y" and (kernel_choice == "linux-yocto_4.4" or kernel_choice == "linux-yocto_4.1"): }}
-{{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_vesa" }}
-{{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }}
-{{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }}
-{{ input type:"choice" val:"xserver_i965" msg:"i965 xserver support" }}
-{{ input type:"choice" val:"xserver_fbdev" msg:"fbdev xserver support" }}
-{{ input type:"choice" val:"xserver_modesetting" msg:"modesetting xserver support" }}
-
-{{ if xserver == "y" and kernel_choice == "custom": }}
+{{ if xserver == "y": }}
{{ input type:"choicelist" name:"xserver_choice" prio:"50" msg:"Please select an xserver for this machine:" default:"xserver_vesa" }}
{{ input type:"choice" val:"xserver_vesa" msg:"VESA xserver support" }}
{{ input type:"choice" val:"xserver_i915" msg:"i915 xserver support" }}
@@ -49,7 +41,7 @@ MACHINE_FEATURES += "wifi efi pcbios"
{{ input type:"choice" val:"xserver_fbdev" msg:"fbdev xserver support" }}
{{ input type:"choice" val:"xserver_modesetting" msg:"modesetting xserver support" }}
-{{ if xserver == "y" and kernel_choice != "linux-yocto_4.4" and kernel_choice != "linux-yocto_4.1" and kernel_choice != "custom": xserver_choice = "xserver_i915" }}
+{{ if xserver == "y" and kernel_choice != "linux-yocto_4.8" and kernel_choice != "linux-yocto_4.4" and kernel_choice != "linux-yocto_4.1" and kernel_choice != "custom": xserver_choice = "xserver_i915" }}
{{ if xserver == "y": }}
XSERVER ?= "${XSERVER_X86_BASE} \
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc
index 67a54be87..38d1ca558 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH i386
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..137d8fa51
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc
index 7c9dc52d2..b34f3d352 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH mips
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..7c0df8bcd
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc
index 7c9dc52d2..b34f3d352 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH mips
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..accf9d5d8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc
index 89b344fea..c166fcd3d 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH powerpc
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..7c0df8bcd
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc
index 14554da28..43cf642d4 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine-standard.scc
@@ -4,15 +4,15 @@ define KMACHINE {{=machine}}
define KARCH {{=qemuarch}}
{{ if qemuarch == "i386" or qemuarch == "x86_64": }}
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if qemuarch == "arm": }}
-include bsp/arm-versatile-926ejs/arm-versatile-926ejs-standard
+include bsp/arm-versatile-926ejs/arm-versatile-926ejs-standard nopatch
{{ if qemuarch == "powerpc": }}
-include bsp/qemu-ppc32/qemu-ppc32-standard
+include bsp/qemu-ppc32/qemu-ppc32-standard nopatch
{{ if qemuarch == "mips": }}
-include bsp/mti-malta32/mti-malta32-be-standard
+include bsp/mti-malta32/mti-malta32-be-standard nopatch
{{ if qemuarch == "mips64": }}
-include bsp/mti-malta64/mti-malta64-be-standard
+include bsp/mti-malta64/mti-malta64-be-standard nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg
index d560784b5..3fa4ed0b7 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/files/machine.cfg
@@ -1 +1,3 @@
-# yocto-bsp-filename {{=machine}}.cfg \ No newline at end of file
+# yocto-bsp-filename {{=machine}}.cfg
+{{ if qemuarch == "i386": }}
+# CONFIG_64BIT is not set
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..c1635d6f6
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,62 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..e8c3fc82e
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,61 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta64" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc
index 9c9cc9025..a2b291085 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/files/machine-standard.scc
@@ -3,7 +3,7 @@ define KMACHINE {{=machine}}
define KARCH x86_64
-include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}}
+include {{=map_standard_kbranch(need_new_kbranch, new_kbranch, existing_kbranch)}} nopatch
{{ if need_new_kbranch == "y": }}
define KTYPE {{=new_kbranch}}
branch {{=machine}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
index 00cf36042..0120ed055 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
{{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.4) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
{{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.4"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
new file mode 100644
index 000000000..5fb45d9ab
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -0,0 +1,33 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-patches.scc \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..7c0df8bcd
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,32 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.8": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}} = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+ file://{{=machine}}-user-config.cfg \
+ file://{{=machine}}-user-features.scc \
+ "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.8"
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/__init__.py b/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
index ff97dfc94..e675133f6 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Development tool - utility functions for plugins
#
@@ -26,10 +26,11 @@ import re
logger = logging.getLogger('devtool')
-
class DevtoolError(Exception):
"""Exception for handling devtool errors"""
- pass
+ def __init__(self, message, exitcode=1):
+ super(DevtoolError, self).__init__(message)
+ self.exitcode = exitcode
def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
@@ -59,7 +60,7 @@ def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
def exec_watch(cmd, **options):
"""Run program with stdout shown on sys.stdout"""
import bb
- if isinstance(cmd, basestring) and not "shell" in options:
+ if isinstance(cmd, str) and not "shell" in options:
options["shell"] = True
process = subprocess.Popen(
@@ -69,6 +70,7 @@ def exec_watch(cmd, **options):
buf = ''
while True:
out = process.stdout.read(1)
+ out = out.decode('utf-8')
if out:
sys.stdout.write(out)
sys.stdout.flush()
@@ -144,8 +146,7 @@ def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
not path.startswith(config.workspace_path)]
else:
append_files = None
- return oe.recipeutils.parse_recipe(recipefile, append_files,
- tinfoil.config_data)
+ return oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
"""
@@ -155,7 +156,7 @@ def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
workspacepn = pn
- for recipe, value in workspace.iteritems():
+ for recipe, value in workspace.items():
if recipe == pn:
break
if bbclassextend:
@@ -195,15 +196,18 @@ def use_external_build(same_dir, no_same_dir, d):
b_is_s = False
return b_is_s
-def setup_git_repo(repodir, version, devbranch, basetag='devtool-base'):
+def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
"""
Set up the git repository for the source tree
"""
import bb.process
+ import oe.patch
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
bb.process.run('git add .', cwd=repodir)
- commit_cmd = ['git', 'commit', '-q']
+ commit_cmd = ['git']
+ oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
+ commit_cmd += ['commit', '-q']
stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
if not stdout:
commit_cmd.append('--allow-empty')
@@ -255,3 +259,32 @@ def get_bbclassextend_targets(recipefile, pn):
elif variant in ['native', 'cross', 'crosssdk']:
targets.append('%s-%s' % (pn, variant))
return targets
+
+def ensure_npm(config, basepath, fixed_setup=False):
+ """
+ Ensure that npm is available and either build it or show a
+ reasonable error message
+ """
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ nativepath = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
+ finally:
+ tinfoil.shutdown()
+
+ npmpath = os.path.join(nativepath, 'npm')
+ if not os.path.exists(npmpath):
+ logger.info('Building nodejs-native')
+ try:
+ exec_build_env_command(config.init_path, basepath,
+ 'bitbake -q nodejs-native', watch=True)
+ except bb.process.ExecutionError as e:
+ if "Nothing PROVIDES 'nodejs-native'" in e.stdout:
+ if fixed_setup:
+ msg = 'nodejs-native is required for npm but is not available within this SDK'
+ else:
+ msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
+ raise DevtoolError(msg)
+ else:
+ raise
+ if not os.path.exists(npmpath):
+ raise DevtoolError('Built nodejs-native but npm binary still could not be found at %s' % npmpath)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/build.py b/import-layers/yocto-poky/scripts/lib/devtool/build.py
index 48f6fe1be..6be549dd5 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/build.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/build.py
@@ -27,7 +27,7 @@ logger = logging.getLogger('devtool')
def _set_file_values(fn, values):
- remaining = values.keys()
+ remaining = list(values.keys())
def varfunc(varname, origvalue, op, newlines):
newvalue = values.get(varname, origvalue)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/build_image.py b/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
index e51d76647..ae75511dc 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
@@ -18,6 +18,7 @@
"""Devtool plugin containing the build-image subcommand."""
import os
+import errno
import logging
from bb.process import ExecutionError
@@ -34,7 +35,7 @@ def _get_packages(tinfoil, workspace, config):
for recipe in workspace:
data = parse_recipe(config, tinfoil, recipe, True)
if 'class-target' in data.getVar('OVERRIDES', True).split(':'):
- if recipe in data.getVar('PACKAGES', True):
+ if recipe in data.getVar('PACKAGES', True).split():
result.append(recipe)
else:
logger.warning("Skipping recipe %s as it doesn't produce a "
@@ -72,70 +73,89 @@ def build_image(args, config, basepath, workspace):
return result
def build_image_task(config, basepath, workspace, image, add_packages=None, task=None, extra_append=None):
- appendfile = os.path.join(config.workspace_path, 'appends',
- '%s.bbappend' % image)
-
# remove <image>.bbappend to make sure setup_tinfoil doesn't
# break because of it
- if os.path.isfile(appendfile):
- os.unlink(appendfile)
+ target_basename = config.get('SDK', 'target_basename', '')
+ if target_basename:
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+ try:
+ os.unlink(appendfile)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
tinfoil = setup_tinfoil(basepath=basepath)
- rd = parse_recipe(config, tinfoil, image, True)
- if not rd:
- # Error already shown
- return (1, None)
- if not bb.data.inherits_class('image', rd):
- raise TargetNotImageError()
-
- outputdir = None
try:
- if workspace or add_packages:
- if add_packages:
- packages = add_packages
- else:
- packages = _get_packages(tinfoil, workspace, config)
- else:
- packages = None
- if not task:
- if not packages and not add_packages and workspace:
- logger.warning('No recipes in workspace, building image %s unmodified', image)
- elif not packages:
- logger.warning('No packages to add, building image %s unmodified', image)
-
- if packages or extra_append:
- bb.utils.mkdirhier(os.path.dirname(appendfile))
- with open(appendfile, 'w') as afile:
- if packages:
- # include packages from workspace recipes into the image
- afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
- if not task:
- logger.info('Building image %s with the following '
- 'additional packages: %s', image, ' '.join(packages))
- if extra_append:
- for line in extra_append:
- afile.write('%s\n' % line)
-
- if task in ['populate_sdk', 'populate_sdk_ext']:
- outputdir = rd.getVar('SDK_DEPLOY', True)
- else:
- outputdir = rd.getVar('DEPLOY_DIR_IMAGE', True)
-
- tinfoil.shutdown()
-
- options = ''
- if task:
- options += '-c %s' % task
-
- # run bitbake to build image (or specified task)
+ rd = parse_recipe(config, tinfoil, image, True)
+ if not rd:
+ # Error already shown
+ return (1, None)
+ if not bb.data.inherits_class('image', rd):
+ raise TargetNotImageError()
+
+ # Get the actual filename used and strip the .bb and full path
+ target_basename = rd.getVar('FILE', True)
+ target_basename = os.path.splitext(os.path.basename(target_basename))[0]
+ config.set('SDK', 'target_basename', target_basename)
+ config.write()
+
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+
+ outputdir = None
try:
- exec_build_env_command(config.init_path, basepath,
- 'bitbake %s %s' % (options, image), watch=True)
- except ExecutionError as err:
- return (err.exitcode, None)
+ if workspace or add_packages:
+ if add_packages:
+ packages = add_packages
+ else:
+ packages = _get_packages(tinfoil, workspace, config)
+ else:
+ packages = None
+ if not task:
+ if not packages and not add_packages and workspace:
+ logger.warning('No recipes in workspace, building image %s unmodified', image)
+ elif not packages:
+ logger.warning('No packages to add, building image %s unmodified', image)
+
+ if packages or extra_append:
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as afile:
+ if packages:
+ # include packages from workspace recipes into the image
+ afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ if not task:
+ logger.info('Building image %s with the following '
+ 'additional packages: %s', image, ' '.join(packages))
+ if extra_append:
+ for line in extra_append:
+ afile.write('%s\n' % line)
+
+ if task in ['populate_sdk', 'populate_sdk_ext']:
+ outputdir = rd.getVar('SDK_DEPLOY', True)
+ else:
+ outputdir = rd.getVar('DEPLOY_DIR_IMAGE', True)
+
+ tmp_tinfoil = tinfoil
+ tinfoil = None
+ tmp_tinfoil.shutdown()
+
+ options = ''
+ if task:
+ options += '-c %s' % task
+
+ # run bitbake to build image (or specified task)
+ try:
+ exec_build_env_command(config.init_path, basepath,
+ 'bitbake %s %s' % (options, image), watch=True)
+ except ExecutionError as err:
+ return (err.exitcode, None)
+ finally:
+ if os.path.isfile(appendfile):
+ os.unlink(appendfile)
finally:
- if os.path.isfile(appendfile):
- os.unlink(appendfile)
+ if tinfoil:
+ tinfoil.shutdown()
return (0, outputdir)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/deploy.py b/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
index 66644ccb6..c4c7bf6c7 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
@@ -85,7 +85,7 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
lines.append('do')
lines.append(' checkpath=`dirname "$checkpath"`')
lines.append('done')
- lines.append('freespace=`df -P $checkpath | sed "1d" | awk \'{ print $4 }\'`')
+ lines.append(r'freespace=$(df -P $checkpath | sed -nre "s/^(\S+\s+){3}([0-9]+).*/\2/p")')
# First line of the file is the total space
lines.append('total=`head -n1 $3`')
lines.append('if [ $total -gt $freespace ] ; then')
@@ -155,83 +155,86 @@ def deploy(args, config, basepath, workspace):
tinfoil = setup_tinfoil(basepath=basepath)
try:
- rd = oe.recipeutils.parse_recipe_simple(tinfoil.cooker, args.recipename, tinfoil.config_data)
- except Exception as e:
- raise DevtoolError('Exception parsing recipe %s: %s' %
- (args.recipename, e))
- recipe_outdir = rd.getVar('D', True)
- if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
- raise DevtoolError('No files to deploy - have you built the %s '
- 'recipe? If so, the install step has not installed '
- 'any files.' % args.recipename)
+ try:
+ rd = oe.recipeutils.parse_recipe_simple(tinfoil.cooker, args.recipename, tinfoil.config_data)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+ recipe_outdir = rd.getVar('D', True)
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
- filelist = []
- ftotalsize = 0
- for root, _, files in os.walk(recipe_outdir):
- for fn in files:
- # Get the size in kiB (since we'll be comparing it to the output of du -k)
- # MUST use lstat() here not stat() or getfilesize() since we don't want to
- # dereference symlinks
- fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
- ftotalsize += fsize
- # The path as it would appear on the target
- fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
- filelist.append((fpath, fsize))
+ filelist = []
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
- if args.dry_run:
- print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
- for item, _ in filelist:
- print(' %s' % item)
- return 0
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
- extraoptions = ''
- if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
- if not args.show_status:
- extraoptions += ' -q'
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
- # In order to delete previously deployed files and have the manifest file on
- # the target, we write out a shell script and then copy it to the target
- # so we can then run it (piping tar output to it).
- # (We cannot use scp here, because it doesn't preserve symlinks.)
- tmpdir = tempfile.mkdtemp(prefix='devtool')
- try:
- tmpscript = '/tmp/devtool_deploy.sh'
- tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
- shellscript = _prepare_remote_script(deploy=True,
- verbose=args.show_status,
- nopreserve=args.no_preserve,
- nocheckspace=args.no_check_space)
- # Write out the script to a file
- with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
- f.write(shellscript)
- # Write out the file list
- with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
- f.write('%d\n' % ftotalsize)
- for fpath, fsize in filelist:
- f.write('%s %d\n' % (fpath, fsize))
- # Copy them to the target
- ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
- if ret != 0:
- raise DevtoolError('Failed to copy script to %s - rerun with -s to '
- 'get a complete error message' % args.target)
- finally:
- shutil.rmtree(tmpdir)
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
+ finally:
+ shutil.rmtree(tmpdir)
- # Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s \'sh %s %s %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
- if ret != 0:
- raise DevtoolError('Deploy failed - rerun with -s to get a complete '
- 'error message')
+ # Now run the script
+ ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s \'sh %s %s %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
- logger.info('Successfully deployed %s' % recipe_outdir)
+ logger.info('Successfully deployed %s' % recipe_outdir)
- files_list = []
- for root, _, files in os.walk(recipe_outdir):
- for filename in files:
- filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
- files_list.append(os.path.join(destdir, filename))
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
+ finally:
+ tinfoil.shutdown()
return 0
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py b/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
index daee7fbbe..ae25cee08 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
@@ -30,9 +30,11 @@ def runqemu(args, config, basepath, workspace):
"""Entry point for the devtool 'runqemu' subcommand"""
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
- machine = tinfoil.config_data.getVar('MACHINE', True)
- bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
- tinfoil.shutdown()
+ try:
+ machine = tinfoil.config_data.getVar('MACHINE', True)
+ bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
+ finally:
+ tinfoil.shutdown()
if not glob.glob(os.path.join(bindir_native, 'qemu-system-*')):
raise DevtoolError('QEMU is not available within this SDK')
@@ -46,7 +48,12 @@ def runqemu(args, config, basepath, workspace):
raise DevtoolError('Unable to determine image name to run, please specify one')
try:
- exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True)
+ # FIXME runqemu assumes that if OECORE_NATIVE_SYSROOT is set then it shouldn't
+ # run bitbake to find out the values of various environment variables, which
+ # isn't the case for the extensible SDK. Work around it for now.
+ newenv = dict(os.environ)
+ newenv.pop('OECORE_NATIVE_SYSROOT', '')
+ exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True, env=newenv)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/sdk.py b/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
index 46fd12bdb..922277b79 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
@@ -107,7 +107,7 @@ def check_manifest(fn, basepath):
return changedfiles
def sdk_update(args, config, basepath, workspace):
- # Fetch locked-sigs.inc file from remote/local destination
+ """Entry point for devtool sdk-update command"""
updateserver = args.updateserver
if not updateserver:
updateserver = config.get('SDK', 'updateserver', '')
@@ -122,10 +122,9 @@ def sdk_update(args, config, basepath, workspace):
else:
logger.debug("Found conf/locked-sigs.inc in %s" % basepath)
- if ':' in updateserver:
- is_remote = True
- else:
- is_remote = False
+ if not '://' in updateserver:
+ logger.error("Update server must be a URL")
+ return -1
layers_dir = os.path.join(basepath, 'layers')
conf_dir = os.path.join(basepath, 'conf')
@@ -139,120 +138,85 @@ def sdk_update(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
- if not is_remote:
- # devtool sdk-update /local/path/to/latest/sdk
- new_locked_sig_file_path = os.path.join(updateserver, 'conf/locked-sigs.inc')
- if not os.path.exists(new_locked_sig_file_path):
- logger.error("%s doesn't exist or is not an extensible SDK" % updateserver)
- return -1
- else:
- logger.debug("Found conf/locked-sigs.inc in %s" % updateserver)
- update_dict = generate_update_dict(new_locked_sig_file_path, old_locked_sig_file_path)
- logger.debug("update_dict = %s" % update_dict)
- newsdk_path = updateserver
- sstate_dir = os.path.join(newsdk_path, 'sstate-cache')
- if not os.path.exists(sstate_dir):
- logger.error("sstate-cache directory not found under %s" % newsdk_path)
- return 1
- sstate_objects = get_sstate_objects(update_dict, sstate_dir)
- logger.debug("sstate_objects = %s" % sstate_objects)
- if len(sstate_objects) == 0:
- logger.info("No need to update.")
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ os.makedirs(os.path.join(tmpsdk_dir, 'conf'))
+ new_locked_sig_file_path = os.path.join(tmpsdk_dir, 'conf', 'locked-sigs.inc')
+ # Fetch manifest from server
+ tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
+ ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
+ changedfiles = check_manifest(tmpmanifest, basepath)
+ if not changedfiles:
+ logger.info("Already up-to-date")
return 0
- logger.info("Installing sstate objects into %s", basepath)
- install_sstate_objects(sstate_objects, updateserver.rstrip('/'), basepath)
- logger.info("Updating configuration files")
- new_conf_dir = os.path.join(updateserver, 'conf')
- shutil.rmtree(conf_dir)
- shutil.copytree(new_conf_dir, conf_dir)
- logger.info("Updating layers")
- new_layers_dir = os.path.join(updateserver, 'layers')
- shutil.rmtree(layers_dir)
- ret = subprocess.call("cp -a %s %s" % (new_layers_dir, layers_dir), shell=True)
- if ret != 0:
- logger.error("Copying %s to %s failed" % (new_layers_dir, layers_dir))
- return ret
- else:
- # devtool sdk-update http://myhost/sdk
- tmpsdk_dir = tempfile.mkdtemp()
- try:
- os.makedirs(os.path.join(tmpsdk_dir, 'conf'))
- new_locked_sig_file_path = os.path.join(tmpsdk_dir, 'conf', 'locked-sigs.inc')
- # Fetch manifest from server
- tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
- ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
- changedfiles = check_manifest(tmpmanifest, basepath)
- if not changedfiles:
- logger.info("Already up-to-date")
- return 0
- # Update metadata
- logger.debug("Updating metadata via git ...")
- #Check for the status before doing a fetch and reset
- if os.path.exists(os.path.join(basepath, 'layers/.git')):
- out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
- if not out:
- ret = subprocess.call("git fetch --all; git reset --hard", shell=True, cwd=layers_dir)
- else:
- logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
- logger.error("Changed files:\n%s" % out);
- return -1
+ # Update metadata
+ logger.debug("Updating metadata via git ...")
+ #Check for the status before doing a fetch and reset
+ if os.path.exists(os.path.join(basepath, 'layers/.git')):
+ out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
+ if not out:
+ ret = subprocess.call("git fetch --all; git reset --hard", shell=True, cwd=layers_dir)
else:
- ret = -1
+ logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
+ logger.error("Changed files:\n%s" % out);
+ return -1
+ else:
+ ret = -1
+ if ret != 0:
+ ret = subprocess.call("git clone %s/layers/.git" % updateserver, shell=True, cwd=tmpsdk_dir)
+ if ret != 0:
+ logger.error("Updating metadata via git failed")
+ return ret
+ logger.debug("Updating conf files ...")
+ for changedfile in changedfiles:
+ ret = subprocess.call("wget -q -O %s %s/%s" % (changedfile, updateserver, changedfile), shell=True, cwd=tmpsdk_dir)
if ret != 0:
- ret = subprocess.call("git clone %s/layers/.git" % updateserver, shell=True, cwd=tmpsdk_dir)
- if ret != 0:
- logger.error("Updating metadata via git failed")
- return ret
- logger.debug("Updating conf files ...")
- for changedfile in changedfiles:
- ret = subprocess.call("wget -q -O %s %s/%s" % (changedfile, updateserver, changedfile), shell=True, cwd=tmpsdk_dir)
- if ret != 0:
- logger.error("Updating %s failed" % changedfile)
- return ret
+ logger.error("Updating %s failed" % changedfile)
+ return ret
- # Check if UNINATIVE_CHECKSUM changed
- uninative = False
- if 'conf/local.conf' in changedfiles:
- def read_uninative_checksums(fn):
- chksumitems = []
- with open(fn, 'r') as f:
- for line in f:
- if line.startswith('UNINATIVE_CHECKSUM'):
- splitline = re.split(r'[\[\]"\']', line)
- if len(splitline) > 3:
- chksumitems.append((splitline[1], splitline[3]))
- return chksumitems
+ # Check if UNINATIVE_CHECKSUM changed
+ uninative = False
+ if 'conf/local.conf' in changedfiles:
+ def read_uninative_checksums(fn):
+ chksumitems = []
+ with open(fn, 'r') as f:
+ for line in f:
+ if line.startswith('UNINATIVE_CHECKSUM'):
+ splitline = re.split(r'[\[\]"\']', line)
+ if len(splitline) > 3:
+ chksumitems.append((splitline[1], splitline[3]))
+ return chksumitems
- oldsums = read_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
- newsums = read_uninative_checksums(os.path.join(tmpsdk_dir, 'conf/local.conf'))
- if oldsums != newsums:
- uninative = True
- for buildarch, chksum in newsums:
- uninative_file = os.path.join('downloads', 'uninative', chksum, '%s-nativesdk-libc.tar.bz2' % buildarch)
- mkdir(os.path.join(tmpsdk_dir, os.path.dirname(uninative_file)))
- ret = subprocess.call("wget -q -O %s %s/%s" % (uninative_file, updateserver, uninative_file), shell=True, cwd=tmpsdk_dir)
+ oldsums = read_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
+ newsums = read_uninative_checksums(os.path.join(tmpsdk_dir, 'conf/local.conf'))
+ if oldsums != newsums:
+ uninative = True
+ for buildarch, chksum in newsums:
+ uninative_file = os.path.join('downloads', 'uninative', chksum, '%s-nativesdk-libc.tar.bz2' % buildarch)
+ mkdir(os.path.join(tmpsdk_dir, os.path.dirname(uninative_file)))
+ ret = subprocess.call("wget -q -O %s %s/%s" % (uninative_file, updateserver, uninative_file), shell=True, cwd=tmpsdk_dir)
- # Ok, all is well at this point - move everything over
- tmplayers_dir = os.path.join(tmpsdk_dir, 'layers')
- if os.path.exists(tmplayers_dir):
- shutil.rmtree(layers_dir)
- shutil.move(tmplayers_dir, layers_dir)
- for changedfile in changedfiles:
- destfile = os.path.join(basepath, changedfile)
- os.remove(destfile)
- shutil.move(os.path.join(tmpsdk_dir, changedfile), destfile)
- os.remove(os.path.join(conf_dir, 'sdk-conf-manifest'))
- shutil.move(tmpmanifest, conf_dir)
- if uninative:
- shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
- shutil.move(os.path.join(tmpsdk_dir, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
+ # Ok, all is well at this point - move everything over
+ tmplayers_dir = os.path.join(tmpsdk_dir, 'layers')
+ if os.path.exists(tmplayers_dir):
+ shutil.rmtree(layers_dir)
+ shutil.move(tmplayers_dir, layers_dir)
+ for changedfile in changedfiles:
+ destfile = os.path.join(basepath, changedfile)
+ os.remove(destfile)
+ shutil.move(os.path.join(tmpsdk_dir, changedfile), destfile)
+ os.remove(os.path.join(conf_dir, 'sdk-conf-manifest'))
+ shutil.move(tmpmanifest, conf_dir)
+ if uninative:
+ shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
+ shutil.move(os.path.join(tmpsdk_dir, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
- if not sstate_mirrors:
- with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
- f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
- f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
- finally:
- shutil.rmtree(tmpsdk_dir)
+ if not sstate_mirrors:
+ with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
+ f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
+ f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ finally:
+ shutil.rmtree(tmpsdk_dir)
if not args.skip_prepare:
# Find all potentially updateable tasks
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/standard.py b/import-layers/yocto-poky/scripts/lib/devtool/standard.py
index 77a82d559..4eff6f878 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/standard.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/standard.py
@@ -1,6 +1,6 @@
# Development tool - standard commands plugin
#
-# Copyright (C) 2014-2015 Intel Corporation
+# Copyright (C) 2014-2016 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -28,8 +28,9 @@ import argparse_oe
import scriptutils
import errno
import glob
+import filecmp
from collections import OrderedDict
-from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, ensure_npm, DevtoolError
from devtool import parse_recipe
logger = logging.getLogger('devtool')
@@ -46,13 +47,13 @@ def add(args, config, basepath, workspace):
# These are positional arguments, but because we're nice, allow
# specifying e.g. source tree without name, or fetch URI without name or
# source tree (if we can detect that that is what the user meant)
- if '://' in args.recipename:
+ if scriptutils.is_src_url(args.recipename):
if not args.fetchuri:
if args.fetch:
raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
args.fetchuri = args.recipename
args.recipename = ''
- elif args.srctree and '://' in args.srctree:
+ elif scriptutils.is_src_url(args.srctree):
if not args.fetchuri:
if args.fetch:
raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
@@ -63,13 +64,17 @@ def add(args, config, basepath, workspace):
args.srctree = args.recipename
args.recipename = None
elif os.path.isdir(args.recipename):
- logger.warn('Ambiguous argument %s - assuming you mean it to be the recipe name')
+ logger.warn('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
+
+ if args.srctree and os.path.isfile(args.srctree):
+ args.fetchuri = 'file://' + os.path.abspath(args.srctree)
+ args.srctree = ''
if args.fetch:
if args.fetchuri:
raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
else:
- # FIXME should show a warning that -f/--fetch is deprecated here
+ logger.warn('-f/--fetch option is deprecated - you can now simply specify the URL to fetch as a positional argument instead')
args.fetchuri = args.fetch
if args.recipename:
@@ -80,10 +85,6 @@ def add(args, config, basepath, workspace):
if reason:
raise DevtoolError(reason)
- # FIXME this ought to be in validate_pn but we're using that in other contexts
- if '/' in args.recipename:
- raise DevtoolError('"/" is not a valid character in recipe names')
-
if args.srctree:
srctree = os.path.abspath(args.srctree)
srctreeparent = None
@@ -127,6 +128,9 @@ def add(args, config, basepath, workspace):
color = args.color
extracmdopts = ''
if args.fetchuri:
+ if args.fetchuri.startswith('npm://'):
+ ensure_npm(config, basepath, args.fixed_setup)
+
source = args.fetchuri
if srctree:
extracmdopts += ' -x %s' % srctree
@@ -144,16 +148,28 @@ def add(args, config, basepath, workspace):
extracmdopts += ' --also-native'
if args.src_subdir:
extracmdopts += ' --src-subdir "%s"' % args.src_subdir
+ if args.autorev:
+ extracmdopts += ' -a'
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
- try:
- stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create -o %s "%s" %s' % (color, tempdir, source, extracmdopts))
- except bb.process.ExecutionError as e:
- if e.exitcode == 15:
- raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
- else:
- raise DevtoolError('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ while True:
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create -o %s "%s" %s' % (color, tempdir, source, extracmdopts))
+ except bb.process.ExecutionError as e:
+ if e.exitcode == 14:
+ # FIXME this is a horrible hack that is unfortunately
+ # necessary due to the fact that we can't run bitbake from
+ # inside recipetool since recipetool keeps tinfoil active
+ # with references to it throughout the code, so we have
+ # to exit out and come back here to do it.
+ ensure_npm(config, basepath, args.fixed_setup)
+ continue
+ elif e.exitcode == 15:
+ raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
+ else:
+ raise DevtoolError('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ break
recipes = glob.glob(os.path.join(tempdir, '*.bb'))
if recipes:
@@ -205,55 +221,57 @@ def add(args, config, basepath, workspace):
for fn in os.listdir(recipedir):
_add_md5(config, recipename, os.path.join(recipedir, fn))
- if args.fetchuri and not args.no_git:
- setup_git_repo(srctree, args.version, 'devtool')
-
- initial_rev = None
- if os.path.exists(os.path.join(srctree, '.git')):
- (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
-
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
- rd = oe.recipeutils.parse_recipe(recipefile, None, tinfoil.config_data)
- if not rd:
- return 1
-
- if args.src_subdir:
- srctree = os.path.join(srctree, args.src_subdir)
-
- bb.utils.mkdirhier(os.path.dirname(appendfile))
- with open(appendfile, 'w') as f:
- f.write('inherit externalsrc\n')
- f.write('EXTERNALSRC = "%s"\n' % srctree)
-
- b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
- if b_is_s:
- f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
- if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
-
- if args.binary:
- f.write('do_install_append() {\n')
- f.write(' rm -rf ${D}/.git\n')
- f.write(' rm -f ${D}/singletask.lock\n')
- f.write('}\n')
-
- if bb.data.inherits_class('npm', rd):
- f.write('do_install_append() {\n')
- f.write(' # Remove files added to source dir by devtool/externalsrc\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
- f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
- f.write(' done\n')
- f.write('}\n')
+ try:
+ rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, None)
+ if not rd:
+ return 1
- _add_md5(config, recipename, appendfile)
+ if args.fetchuri and not args.no_git:
+ setup_git_repo(srctree, args.version, 'devtool', d=tinfoil.config_data)
- logger.info('Recipe %s has been automatically created; further editing may be required to make it fully functional' % recipefile)
+ initial_rev = None
+ if os.path.exists(os.path.join(srctree, '.git')):
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
+
+ if args.src_subdir:
+ srctree = os.path.join(srctree, args.src_subdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('inherit externalsrc\n')
+ f.write('EXTERNALSRC = "%s"\n' % srctree)
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+
+ if args.binary:
+ f.write('do_install_append() {\n')
+ f.write(' rm -rf ${D}/.git\n')
+ f.write(' rm -f ${D}/singletask.lock\n')
+ f.write('}\n')
+
+ if bb.data.inherits_class('npm', rd):
+ f.write('do_install_append() {\n')
+ f.write(' # Remove files added to source dir by devtool/externalsrc\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
+ f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
+ f.write(' done\n')
+ f.write('}\n')
+
+ _add_md5(config, recipename, appendfile)
+
+ logger.info('Recipe %s has been automatically created; further editing may be required to make it fully functional' % recipefile)
- tinfoil.shutdown()
+ finally:
+ tinfoil.shutdown()
return 0
@@ -262,28 +280,30 @@ def _check_compatible_recipe(pn, d):
"""Check if the recipe is supported by devtool"""
if pn == 'perf':
raise DevtoolError("The perf recipe does not actually check out "
- "source and thus cannot be supported by this tool")
+ "source and thus cannot be supported by this tool",
+ 4)
if pn in ['kernel-devsrc', 'package-index'] or pn.startswith('gcc-source'):
- raise DevtoolError("The %s recipe is not supported by this tool" % pn)
+ raise DevtoolError("The %s recipe is not supported by this tool" % pn, 4)
if bb.data.inherits_class('image', d):
raise DevtoolError("The %s recipe is an image, and therefore is not "
- "supported by this tool" % pn)
+ "supported by this tool" % pn, 4)
if bb.data.inherits_class('populate_sdk', d):
raise DevtoolError("The %s recipe is an SDK, and therefore is not "
- "supported by this tool" % pn)
+ "supported by this tool" % pn, 4)
if bb.data.inherits_class('packagegroup', d):
raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
- "not supported by this tool" % pn)
+ "not supported by this tool" % pn, 4)
if bb.data.inherits_class('meta', d):
raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
- "not supported by this tool" % pn)
+ "not supported by this tool" % pn, 4)
if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC', True):
+ # Not an incompatibility error per se, so we don't pass the error code
raise DevtoolError("externalsrc is currently enabled for the %s "
"recipe. This prevents the normal do_patch task "
"from working. You will need to disable this "
@@ -296,6 +316,13 @@ def _move_file(src, dst):
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
+def _copy_file(src, dst):
+ """Copy a file. Creates all the directory components of destination path."""
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.copy(src, dst)
+
def _git_ls_tree(repodir, treeish='HEAD', recursive=False):
"""List contents of a git treeish"""
import bb
@@ -319,7 +346,7 @@ def _git_exclude_path(srctree, path):
# becomes greater than that.
path = os.path.normpath(path)
recurse = True if len(path.split(os.path.sep)) > 1 else False
- git_files = _git_ls_tree(srctree, 'HEAD', recurse).keys()
+ git_files = list(_git_ls_tree(srctree, 'HEAD', recurse).keys())
if path in git_files:
git_files.remove(path)
return git_files
@@ -343,19 +370,21 @@ def extract(args, config, basepath, workspace):
if not tinfoil:
# Error already shown
return 1
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
- rd = parse_recipe(config, tinfoil, args.recipename, True)
- if not rd:
- return 1
-
- srctree = os.path.abspath(args.srctree)
- initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd)
- logger.info('Source tree extracted to %s' % srctree)
+ srctree = os.path.abspath(args.srctree)
+ initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd)
+ logger.info('Source tree extracted to %s' % srctree)
- if initial_rev:
- return 0
- else:
- return 1
+ if initial_rev:
+ return 0
+ else:
+ return 1
+ finally:
+ tinfoil.shutdown()
def sync(args, config, basepath, workspace):
"""Entry point for the devtool 'sync' subcommand"""
@@ -365,19 +394,21 @@ def sync(args, config, basepath, workspace):
if not tinfoil:
# Error already shown
return 1
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
- rd = parse_recipe(config, tinfoil, args.recipename, True)
- if not rd:
- return 1
-
- srctree = os.path.abspath(args.srctree)
- initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd)
- logger.info('Source tree %s synchronized' % srctree)
+ srctree = os.path.abspath(args.srctree)
+ initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd)
+ logger.info('Source tree %s synchronized' % srctree)
- if initial_rev:
- return 0
- else:
- return 1
+ if initial_rev:
+ return 0
+ else:
+ return 1
+ finally:
+ tinfoil.shutdown()
class BbTaskExecutor(object):
"""Class for executing bitbake tasks for a recipe
@@ -411,7 +442,10 @@ class BbTaskExecutor(object):
class PatchTaskExecutor(BbTaskExecutor):
def __init__(self, rdata):
+ import oe.patch
self.check_git = False
+ self.useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(self.useroptions, d=rdata)
super(PatchTaskExecutor, self).__init__(rdata)
def exec_func(self, func, report):
@@ -438,7 +472,7 @@ class PatchTaskExecutor(BbTaskExecutor):
stdout, _ = bb.process.run('git status --porcelain', cwd=srcsubdir)
if stdout:
- bb.process.run('git add .; git commit -a -m "Committing changes from %s\n\n%s"' % (func, GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
+ bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(self.useroptions), func, GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
def _prep_extract_operation(config, basepath, recipename, tinfoil=None):
@@ -496,7 +530,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, d):
if 'noexec' in (d.getVarFlags('do_unpack', False) or []):
raise DevtoolError("The %s recipe has do_unpack disabled, unable to "
- "extract source" % pn)
+ "extract source" % pn, 4)
if not sync:
# Prepare for shutil.move later on
@@ -547,8 +581,14 @@ def _extract_source(srctree, keep_temp, devbranch, sync, d):
recipe_patches = [os.path.basename(patch) for patch in
oe.recipeutils.get_recipe_patches(crd)]
local_files = oe.recipeutils.get_recipe_local_files(crd)
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
local_files = [fname for fname in local_files if
- os.path.exists(os.path.join(workdir, fname))]
+ os.path.exists(os.path.join(workdir, fname)) and
+ (srcabspath == workdir or not
+ os.path.join(workdir, fname).startswith(srcabspath +
+ os.sep))]
if local_files:
for fname in local_files:
_move_file(os.path.join(workdir, fname),
@@ -583,7 +623,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, d):
"doesn't use any source or the correct source "
"directory could not be determined" % pn)
- setup_git_repo(srcsubdir, crd.getVar('PV', True), devbranch)
+ setup_git_repo(srcsubdir, crd.getVar('PV', True), devbranch, d=d)
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
initial_rev = stdout.rstrip()
@@ -703,128 +743,127 @@ def modify(args, config, basepath, workspace):
args.recipename)
tinfoil = setup_tinfoil(basepath=basepath)
- rd = parse_recipe(config, tinfoil, args.recipename, True)
- if not rd:
- return 1
-
- pn = rd.getVar('PN', True)
- if pn != args.recipename:
- logger.info('Mapping %s to %s' % (args.recipename, pn))
- if pn in workspace:
- raise DevtoolError("recipe %s is already in your workspace" %
- pn)
-
- if args.srctree:
- srctree = os.path.abspath(args.srctree)
- else:
- srctree = get_default_srctree(config, pn)
-
- if args.no_extract and not os.path.isdir(srctree):
- raise DevtoolError("--no-extract specified and source path %s does "
- "not exist or is not a directory" %
- srctree)
- if not args.no_extract:
- tinfoil = _prep_extract_operation(config, basepath, pn, tinfoil)
- if not tinfoil:
- # Error already shown
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
return 1
- recipefile = rd.getVar('FILE', True)
- appendfile = recipe_to_append(recipefile, config, args.wildcard)
- if os.path.exists(appendfile):
- raise DevtoolError("Another variant of recipe %s is already in your "
- "workspace (only one variant of a recipe can "
- "currently be worked on at once)"
- % pn)
-
- _check_compatible_recipe(pn, rd)
+ pn = rd.getVar('PN', True)
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ pn)
- initial_rev = None
- commits = []
- if not args.no_extract:
- initial_rev = _extract_source(srctree, False, args.branch, False, rd)
- if not initial_rev:
- return 1
- logger.info('Source tree extracted to %s' % srctree)
- # Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
- else:
- if os.path.exists(os.path.join(srctree, '.git')):
- # Check if it's a tree previously extracted by us
- try:
- (stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
- except bb.process.ExecutionError:
- stdout = ''
- for line in stdout.splitlines():
- if line.startswith('*'):
- (stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
- initial_rev = stdout.rstrip()
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = get_default_srctree(config, pn)
+
+ if args.no_extract and not os.path.isdir(srctree):
+ raise DevtoolError("--no-extract specified and source path %s does "
+ "not exist or is not a directory" %
+ srctree)
+ if not args.no_extract:
+ tinfoil = _prep_extract_operation(config, basepath, pn, tinfoil)
+ if not tinfoil:
+ # Error already shown
+ return 1
+
+ recipefile = rd.getVar('FILE', True)
+ appendfile = recipe_to_append(recipefile, config, args.wildcard)
+ if os.path.exists(appendfile):
+ raise DevtoolError("Another variant of recipe %s is already in your "
+ "workspace (only one variant of a recipe can "
+ "currently be worked on at once)"
+ % pn)
+
+ _check_compatible_recipe(pn, rd)
+
+ initial_rev = None
+ commits = []
+ if not args.no_extract:
+ initial_rev = _extract_source(srctree, False, args.branch, False, rd)
if not initial_rev:
- # Otherwise, just grab the head revision
- (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
-
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S', True))
- workdir = os.path.abspath(rd.getVar('WORKDIR', True))
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree = os.path.join(srctree, srcsubdir)
-
- bb.utils.mkdirhier(os.path.dirname(appendfile))
- with open(appendfile, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
- # Local files can be modified/tracked in separate subdir under srctree
- # Mostly useful for packages with S != WORKDIR
- f.write('FILESPATH_prepend := "%s:"\n' %
- os.path.join(srctree, 'oe-local-files'))
-
- f.write('\ninherit externalsrc\n')
- f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
-
- b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
- if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
-
- if bb.data.inherits_class('kernel', rd):
- f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_patch do_kernel_configme do_kernel_configcheck"\n')
- f.write('\ndo_configure_append() {\n'
- ' cp ${B}/.config ${S}/.config.baseline\n'
- ' ln -sfT ${B}/.config ${S}/.config.new\n'
- '}\n')
- if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
- for commit in commits:
- f.write('# commit: %s\n' % commit)
-
- _add_md5(config, pn, appendfile)
+ return 1
+ logger.info('Source tree extracted to %s' % srctree)
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
+ commits = stdout.split()
+ else:
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Check if it's a tree previously extracted by us
+ try:
+ (stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
+ except bb.process.ExecutionError:
+ stdout = ''
+ for line in stdout.splitlines():
+ if line.startswith('*'):
+ (stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
+ initial_rev = stdout.rstrip()
+ if not initial_rev:
+ # Otherwise, just grab the head revision
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
- logger.info('Recipe %s now set up to build from %s' % (pn, srctree))
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(rd.getVar('S', True))
+ workdir = os.path.abspath(rd.getVar('WORKDIR', True))
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH_prepend := "%s:"\n' %
+ os.path.join(srctree, 'oe-local-files'))
+
+ f.write('\ninherit externalsrc\n')
+ f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
+ f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+
+ if bb.data.inherits_class('kernel', rd):
+ f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
+ 'do_fetch do_unpack do_patch do_kernel_configme do_kernel_configcheck"\n')
+ f.write('\ndo_configure_append() {\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ '}\n')
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+ for commit in commits:
+ f.write('# commit: %s\n' % commit)
+
+ _add_md5(config, pn, appendfile)
+
+ logger.info('Recipe %s now set up to build from %s' % (pn, srctree))
- tinfoil.shutdown()
+ finally:
+ tinfoil.shutdown()
return 0
-def _get_patchset_revs(args, srctree, recipe_path):
+def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
"""Get initial and update rev of a recipe. These are the start point of the
whole patchset and start point for the patches to be re-generated/updated.
"""
import bb
- if args.initial_rev:
- return args.initial_rev, args.initial_rev
-
- # Parse initial rev from recipe
+ # Parse initial rev from recipe if not specified
commits = []
- initial_rev = None
with open(recipe_path, 'r') as f:
for line in f:
if line.startswith('# initial_rev:'):
- initial_rev = line.split(':')[-1].strip()
+ if not initial_rev:
+ initial_rev = line.split(':')[-1].strip()
elif line.startswith('# commit:'):
commits.append(line.split(':')[-1].strip())
@@ -835,7 +874,7 @@ def _get_patchset_revs(args, srctree, recipe_path):
stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
initial_rev, cwd=srctree)
newcommits = stdout.split()
- for i in xrange(min(len(commits), len(newcommits))):
+ for i in range(min(len(commits), len(newcommits))):
if newcommits[i] == commits[i]:
update_rev = commits[i]
@@ -861,7 +900,7 @@ def _remove_file_entries(srcuri, filelist):
entries = []
for fname in filelist:
basename = os.path.basename(fname)
- for i in xrange(len(srcuri)):
+ for i in range(len(srcuri)):
if (srcuri[i].startswith('file://') and
os.path.basename(srcuri[i].split(';')[0]) == basename):
entries.append(srcuri[i])
@@ -870,10 +909,10 @@ def _remove_file_entries(srcuri, filelist):
break
return entries, remaining
-def _remove_source_files(args, files, destpath):
+def _remove_source_files(append, files, destpath):
"""Unlink existing patch files"""
for path in files:
- if args.append:
+ if append:
if not destpath:
raise Exception('destpath should be set here')
path = os.path.join(destpath, os.path.basename(path))
@@ -996,7 +1035,7 @@ def _export_local_files(srctree, rd, destdir):
bb.process.run(['git', 'checkout', tree, '--', '.'], cwd=srctree,
env=dict(os.environ, GIT_WORK_TREE=destdir,
GIT_INDEX_FILE=tmp_index))
- new_set = _git_ls_tree(srctree, tree, True).keys()
+ new_set = list(_git_ls_tree(srctree, tree, True).keys())
elif os.path.isdir(local_files_dir):
# If not tracked by Git, just copy from working copy
new_set = _ls_tree(os.path.join(srctree, 'oe-local-files'))
@@ -1026,15 +1065,47 @@ def _export_local_files(srctree, rd, destdir):
if new_set is not None:
for fname in new_set:
if fname in existing_files:
- updated[fname] = existing_files.pop(fname)
+ origpath = existing_files.pop(fname)
+ workpath = os.path.join(local_files_dir, fname)
+ if not filecmp.cmp(origpath, workpath):
+ updated[fname] = origpath
elif fname != '.gitignore':
added[fname] = None
+ workdir = rd.getVar('WORKDIR', True)
+ s = rd.getVar('S', True)
+ if not s.endswith(os.sep):
+ s += os.sep
+
+ if workdir != s:
+ # Handle files where subdir= was specified
+ for fname in list(existing_files.keys()):
+ # FIXME handle both subdir starting with BP and not?
+ fworkpath = os.path.join(workdir, fname)
+ if fworkpath.startswith(s):
+ fpath = os.path.join(srctree, os.path.relpath(fworkpath, s))
+ if os.path.exists(fpath):
+ origpath = existing_files.pop(fname)
+ if not filecmp.cmp(origpath, fpath):
+ updated[fpath] = origpath
+
removed = existing_files
return (updated, added, removed)
-def _update_recipe_srcrev(args, srctree, rd, config_data):
+def _determine_files_dir(rd):
+ """Determine the appropriate files directory for a recipe"""
+ recipedir = rd.getVar('FILE_DIRNAME', True)
+ for entry in rd.getVar('FILESPATH', True).split(':'):
+ relpth = os.path.relpath(entry, recipedir)
+ if not os.sep in relpth:
+ # One (or zero) levels below only, so we don't put anything in machine-specific directories
+ if os.path.isdir(entry):
+ return entry
+ return os.path.join(recipedir, rd.getVar('BPN', True))
+
+
+def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remove):
"""Implement the 'srcrev' mode of update-recipe"""
import bb
import oe.recipeutils
@@ -1063,7 +1134,7 @@ def _update_recipe_srcrev(args, srctree, rd, config_data):
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
- if not args.no_remove:
+ if not no_remove:
# Find list of existing patches in recipe file
patches_dir = tempfile.mkdtemp(dir=tempdir)
old_srcrev = (rd.getVar('SRCREV', False) or '')
@@ -1071,29 +1142,33 @@ def _update_recipe_srcrev(args, srctree, rd, config_data):
patches_dir)
# Remove deleted local files and "overlapping" patches
- remove_files = del_f.values() + upd_p.values()
+ remove_files = list(del_f.values()) + list(upd_p.values())
if remove_files:
removedentries = _remove_file_entries(srcuri, remove_files)[0]
update_srcuri = True
- if args.append:
+ if appendlayerdir:
files = dict((os.path.join(local_files_dir, key), val) for
- key, val in upd_f.items() + new_f.items())
+ key, val in list(upd_f.items()) + list(new_f.items()))
removevalues = {}
if update_srcuri:
removevalues = {'SRC_URI': removedentries}
patchfields['SRC_URI'] = '\\\n '.join(srcuri)
_, destpath = oe.recipeutils.bbappend_recipe(
- rd, args.append, files, wildcardver=args.wildcard_version,
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
extralines=patchfields, removevalues=removevalues)
else:
- files_dir = os.path.join(os.path.dirname(recipefile),
- rd.getVar('BPN', True))
- for basepath, path in upd_f.iteritems():
+ files_dir = _determine_files_dir(rd)
+ for basepath, path in upd_f.items():
logger.info('Updating file %s' % basepath)
- _move_file(os.path.join(local_files_dir, basepath), path)
+ if os.path.isabs(basepath):
+ # Original file (probably with subdir pointing inside source tree)
+ # so we do not want to move it, just copy
+ _copy_file(basepath, path)
+ else:
+ _move_file(os.path.join(local_files_dir, basepath), path)
update_srcuri= True
- for basepath, path in new_f.iteritems():
+ for basepath, path in new_f.items():
logger.info('Adding new file %s' % basepath)
_move_file(os.path.join(local_files_dir, basepath),
os.path.join(files_dir, basepath))
@@ -1109,21 +1184,21 @@ def _update_recipe_srcrev(args, srctree, rd, config_data):
'point to a git repository where you have pushed your '
'changes')
- _remove_source_files(args, remove_files, destpath)
+ _remove_source_files(appendlayerdir, remove_files, destpath)
return True
-def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
+def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, initial_rev):
"""Implement the 'patch' mode of update-recipe"""
import bb
import oe.recipeutils
recipefile = rd.getVar('FILE', True)
- append = workspace[args.recipename]['bbappend']
+ append = workspace[recipename]['bbappend']
if not os.path.exists(append):
raise DevtoolError('unable to find workspace bbappend for recipe %s' %
- args.recipename)
+ recipename)
- initial_rev, update_rev, changed_revs = _get_patchset_revs(args, srctree, append)
+ initial_rev, update_rev, changed_revs = _get_patchset_revs(srctree, append, initial_rev)
if not initial_rev:
raise DevtoolError('Unable to find initial revision - please specify '
'it with --initial-rev')
@@ -1134,13 +1209,13 @@ def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir)
remove_files = []
- if not args.no_remove:
+ if not no_remove:
# Get all patches from source tree and check if any should be removed
all_patches_dir = tempfile.mkdtemp(dir=tempdir)
upd_p, new_p, del_p = _export_patches(srctree, rd, initial_rev,
all_patches_dir)
# Remove deleted local files and patches
- remove_files = del_f.values() + del_p.values()
+ remove_files = list(del_f.values()) + list(del_p.values())
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
@@ -1150,11 +1225,11 @@ def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
- if args.append:
+ if appendlayerdir:
files = dict((os.path.join(local_files_dir, key), val) for
- key, val in upd_f.items() + new_f.items())
+ key, val in list(upd_f.items()) + list(new_f.items()))
files.update(dict((os.path.join(patches_dir, key), val) for
- key, val in upd_p.items() + new_p.items()))
+ key, val in list(upd_p.items()) + list(new_p.items())))
if files or remove_files:
removevalues = None
if remove_files:
@@ -1165,17 +1240,23 @@ def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
_, destpath = oe.recipeutils.bbappend_recipe(
- rd, args.append, files,
+ rd, appendlayerdir, files,
+ wildcardver=wildcard_version,
removevalues=removevalues)
else:
logger.info('No patches or local source files needed updating')
else:
# Update existing files
- for basepath, path in upd_f.iteritems():
+ for basepath, path in upd_f.items():
logger.info('Updating file %s' % basepath)
- _move_file(os.path.join(local_files_dir, basepath), path)
+ if os.path.isabs(basepath):
+ # Original file (probably with subdir pointing inside source tree)
+ # so we do not want to move it, just copy
+ _copy_file(basepath, path)
+ else:
+ _move_file(os.path.join(local_files_dir, basepath), path)
updatefiles = True
- for basepath, path in upd_p.iteritems():
+ for basepath, path in upd_p.items():
patchfn = os.path.join(patches_dir, basepath)
if changed_revs is not None:
# Avoid updating patches that have not actually changed
@@ -1188,15 +1269,14 @@ def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
_move_file(patchfn, path)
updatefiles = True
# Add any new files
- files_dir = os.path.join(os.path.dirname(recipefile),
- rd.getVar('BPN', True))
- for basepath, path in new_f.iteritems():
+ files_dir = _determine_files_dir(rd)
+ for basepath, path in new_f.items():
logger.info('Adding new file %s' % basepath)
_move_file(os.path.join(local_files_dir, basepath),
os.path.join(files_dir, basepath))
srcuri.append('file://%s' % basepath)
updaterecipe = True
- for basepath, path in new_p.iteritems():
+ for basepath, path in new_p.items():
logger.info('Adding new patch %s' % basepath)
_move_file(os.path.join(patches_dir, basepath),
os.path.join(files_dir, basepath))
@@ -1216,7 +1296,7 @@ def _update_recipe_patch(args, config, workspace, srctree, rd, config_data):
finally:
shutil.rmtree(tempdir)
- _remove_source_files(args, remove_files, destpath)
+ _remove_source_files(appendlayerdir, remove_files, destpath)
return True
def _guess_recipe_update_mode(srctree, rdata):
@@ -1241,6 +1321,19 @@ def _guess_recipe_update_mode(srctree, rdata):
return 'patch'
+def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_version, no_remove, initial_rev):
+ srctree = workspace[recipename]['srctree']
+ if mode == 'auto':
+ mode = _guess_recipe_update_mode(srctree, rd)
+
+ if mode == 'srcrev':
+ updated = _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remove)
+ elif mode == 'patch':
+ updated = _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, initial_rev)
+ else:
+ raise DevtoolError('update_recipe: invalid mode %s' % mode)
+ return updated
+
def update_recipe(args, config, basepath, workspace):
"""Entry point for the devtool 'update-recipe' subcommand"""
check_workspace_recipe(workspace, args.recipename)
@@ -1254,28 +1347,20 @@ def update_recipe(args, config, basepath, workspace):
'destination layer "%s"' % args.append)
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
- rd = parse_recipe(config, tinfoil, args.recipename, True)
- if not rd:
- return 1
-
- srctree = workspace[args.recipename]['srctree']
- if args.mode == 'auto':
- mode = _guess_recipe_update_mode(srctree, rd)
- else:
- mode = args.mode
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
- if mode == 'srcrev':
- updated = _update_recipe_srcrev(args, srctree, rd, tinfoil.config_data)
- elif mode == 'patch':
- updated = _update_recipe_patch(args, config, workspace, srctree, rd, tinfoil.config_data)
- else:
- raise DevtoolError('update_recipe: invalid mode %s' % mode)
+ updated = _update_recipe(args.recipename, workspace, rd, args.mode, args.append, args.wildcard_version, args.no_remove, args.initial_rev)
- if updated:
- rf = rd.getVar('FILE', True)
- if rf.startswith(config.workspace_path):
- logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+ if updated:
+ rf = rd.getVar('FILE', True)
+ if rf.startswith(config.workspace_path):
+ logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+ finally:
+ tinfoil.shutdown()
return 0
@@ -1283,7 +1368,7 @@ def update_recipe(args, config, basepath, workspace):
def status(args, config, basepath, workspace):
"""Entry point for the devtool 'status' subcommand"""
if workspace:
- for recipe, value in workspace.iteritems():
+ for recipe, value in workspace.items():
recipefile = value['recipefile']
if recipefile:
recipestr = ' (%s)' % recipefile
@@ -1295,23 +1380,10 @@ def status(args, config, basepath, workspace):
return 0
-def reset(args, config, basepath, workspace):
- """Entry point for the devtool 'reset' subcommand"""
- import bb
- if args.recipename:
- if args.all:
- raise DevtoolError("Recipe cannot be specified if -a/--all is used")
- else:
- check_workspace_recipe(workspace, args.recipename, checksrc=False)
- elif not args.all:
- raise DevtoolError("Recipe must be specified, or specify -a/--all to "
- "reset all recipes")
- if args.all:
- recipes = workspace.keys()
- else:
- recipes = [args.recipename]
+def _reset(recipes, no_clean, config, basepath, workspace):
+ """Reset one or more recipes"""
- if recipes and not args.no_clean:
+ if recipes and not no_clean:
if len(recipes) == 1:
logger.info('Cleaning sysroot for recipe %s...' % recipes[0])
else:
@@ -1323,7 +1395,7 @@ def reset(args, config, basepath, workspace):
for recipe in recipes:
targets.append(recipe)
recipefile = workspace[recipe]['recipefile']
- if recipefile:
+ if recipefile and os.path.exists(recipefile):
targets.extend(get_bbclassextend_targets(recipefile, recipe))
try:
exec_build_env_command(config.init_path, basepath, 'bitbake -c clean %s' % ' '.join(targets))
@@ -1363,6 +1435,126 @@ def reset(args, config, basepath, workspace):
# This is unlikely, but if it's empty we can just remove it
os.rmdir(srctree)
+
+def reset(args, config, basepath, workspace):
+ """Entry point for the devtool 'reset' subcommand"""
+ import bb
+ if args.recipename:
+ if args.all:
+ raise DevtoolError("Recipe cannot be specified if -a/--all is used")
+ else:
+ for recipe in args.recipename:
+ check_workspace_recipe(workspace, recipe, checksrc=False)
+ elif not args.all:
+ raise DevtoolError("Recipe must be specified, or specify -a/--all to "
+ "reset all recipes")
+ if args.all:
+ recipes = list(workspace.keys())
+ else:
+ recipes = args.recipename
+
+ _reset(recipes, args.no_clean, config, basepath, workspace)
+
+ return 0
+
+
+def _get_layer(layername, d):
+ """Determine the base layer path for the specified layer name/path"""
+ layerdirs = d.getVar('BBLAYERS', True).split()
+ layers = {os.path.basename(p): p for p in layerdirs}
+ # Provide some shortcuts
+ if layername.lower() in ['oe-core', 'openembedded-core']:
+ layerdir = layers.get('meta', None)
+ else:
+ layerdir = layers.get(layername, None)
+ if layerdir:
+ layerdir = os.path.abspath(layerdir)
+ return layerdir or layername
+
+def finish(args, config, basepath, workspace):
+ """Entry point for the devtool 'finish' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ check_workspace_recipe(workspace, args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ destlayerdir = _get_layer(args.destination, tinfoil.config_data)
+ origlayerdir = oe.recipeutils.find_layerdir(rd.getVar('FILE', True))
+
+ if not os.path.isdir(destlayerdir):
+ raise DevtoolError('Unable to find layer or directory matching "%s"' % args.destination)
+
+ if os.path.abspath(destlayerdir) == config.workspace_path:
+ raise DevtoolError('"%s" specifies the workspace layer - that is not a valid destination' % args.destination)
+
+ # If it's an upgrade, grab the original path
+ origpath = None
+ origfilelist = None
+ append = workspace[args.recipename]['bbappend']
+ with open(append, 'r') as f:
+ for line in f:
+ if line.startswith('# original_path:'):
+ origpath = line.split(':')[1].strip()
+ elif line.startswith('# original_files:'):
+ origfilelist = line.split(':')[1].split()
+
+ if origlayerdir == config.workspace_path:
+ # Recipe file itself is in workspace, update it there first
+ appendlayerdir = None
+ origrelpath = None
+ if origpath:
+ origlayerpath = oe.recipeutils.find_layerdir(origpath)
+ if origlayerpath:
+ origrelpath = os.path.relpath(origpath, origlayerpath)
+ destpath = oe.recipeutils.get_bbfile_path(rd, destlayerdir, origrelpath)
+ if not destpath:
+ raise DevtoolError("Unable to determine destination layer path - check that %s specifies an actual layer and %s/conf/layer.conf specifies BBFILES. You may also need to specify a more complete path." % (args.destination, destlayerdir))
+ elif destlayerdir == origlayerdir:
+ # Same layer, update the original recipe
+ appendlayerdir = None
+ destpath = None
+ else:
+ # Create/update a bbappend in the specified layer
+ appendlayerdir = destlayerdir
+ destpath = None
+
+ # Remove any old files in the case of an upgrade
+ if origpath and origfilelist and oe.recipeutils.find_layerdir(origpath) == oe.recipeutils.find_layerdir(destlayerdir):
+ for fn in origfilelist:
+ fnp = os.path.join(origpath, fn)
+ try:
+ os.remove(fnp)
+ except FileNotFoundError:
+ pass
+
+ # Actually update the recipe / bbappend
+ _update_recipe(args.recipename, workspace, rd, args.mode, appendlayerdir, wildcard_version=True, no_remove=False, initial_rev=args.initial_rev)
+
+ if origlayerdir == config.workspace_path and destpath:
+ # Recipe file itself is in the workspace - need to move it and any
+ # associated files to the specified layer
+ logger.info('Moving recipe file to %s' % destpath)
+ recipedir = os.path.dirname(rd.getVar('FILE', True))
+ for root, _, files in os.walk(recipedir):
+ for fn in files:
+ srcpath = os.path.join(root, fn)
+ relpth = os.path.relpath(os.path.dirname(srcpath), recipedir)
+ destdir = os.path.abspath(os.path.join(destpath, relpth))
+ bb.utils.mkdirhier(destdir)
+ shutil.move(srcpath, os.path.join(destdir, fn))
+
+ finally:
+ tinfoil.shutdown()
+
+ # Everything else has succeeded, we can now reset
+ _reset([args.recipename], no_clean=False, config=config, basepath=basepath, workspace=workspace)
+
return 0
@@ -1390,10 +1582,11 @@ def register_commands(subparsers, context):
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
+ parser_add.add_argument('--autorev', '-a', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
parser_add.add_argument('--binary', '-b', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure). Useful with binary packages e.g. RPMs.', action='store_true')
parser_add.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
parser_add.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
- parser_add.set_defaults(func=add)
+ parser_add.set_defaults(func=add, fixed_setup=context.fixed_setup)
parser_modify = subparsers.add_parser('modify', help='Modify the source for an existing recipe',
description='Sets up the build environment to modify the source for an existing recipe. The default behaviour is to extract the source being fetched by the recipe into a git tree so you can work on it; alternatively if you already have your own pre-prepared source tree you can specify -n/--no-extract.',
@@ -1446,9 +1639,18 @@ def register_commands(subparsers, context):
parser_status.set_defaults(func=status)
parser_reset = subparsers.add_parser('reset', help='Remove a recipe from your workspace',
- description='Removes the specified recipe from your workspace (resetting its state)',
+ description='Removes the specified recipe(s) from your workspace (resetting its state back to that defined by the metadata).',
group='working', order=-100)
- parser_reset.add_argument('recipename', nargs='?', help='Recipe to reset')
+ parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
parser_reset.set_defaults(func=reset)
+
+ parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
+ description='Pushes any committed changes to the specified recipe to the specified layer and removes it from your workspace. Roughly equivalent to an update-recipe followed by reset, except the update-recipe step will do the "right thing" depending on the recipe and the destination layer specified.',
+ group='working', order=-100)
+ parser_finish.add_argument('recipename', help='Recipe to finish')
+ parser_finish.add_argument('destination', help='Layer/path to put recipe into. Can be the name of a layer configured in your bblayers.conf, the path to the base of a layer, or a partial path inside a layer. %(prog)s will attempt to complete the path based on the layer\'s structure.')
+ parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
+ parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
+ parser_finish.set_defaults(func=finish)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py b/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
index a085f78c4..a4239f1cd 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
@@ -70,18 +70,26 @@ def _remove_patch_dirs(recipefolder):
def _recipe_contains(rd, var):
rf = rd.getVar('FILE', True)
varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
- for var, fn in varfiles.iteritems():
+ for var, fn in varfiles.items():
if fn and fn.startswith(os.path.dirname(rf) + os.sep):
return True
return False
def _rename_recipe_dirs(oldpv, newpv, path):
for root, dirs, files in os.walk(path):
+ # Rename directories with the version in their name
for olddir in dirs:
if olddir.find(oldpv) != -1:
newdir = olddir.replace(oldpv, newpv)
if olddir != newdir:
shutil.move(os.path.join(path, olddir), os.path.join(path, newdir))
+ # Rename any inc files with the version in their name (unusual, but possible)
+ for oldfile in files:
+ if oldfile.endswith('.inc'):
+ if oldfile.find(oldpv) != -1:
+ newfile = oldfile.replace(oldpv, newpv)
+ if oldfile != newfile:
+ os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
@@ -97,7 +105,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
-def _write_append(rc, srctree, same_dir, no_same_dir, rev, workspace, d):
+def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
@@ -120,8 +128,12 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, workspace, d):
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('\n')
if rev:
- f.write('\n# initial_rev: %s\n' % rev)
+ f.write('# initial_rev: %s\n' % rev)
+ if copied:
+ f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE', True)))
+ f.write('# original_files: %s\n' % ' '.join(copied))
return af
def _cleanup_on_error(rf, srctree):
@@ -215,7 +227,9 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
for f in stdout.splitlines():
__run('git add "%s"' % f)
- __run('git commit -q -m "Commit of upstream changes at version %s" --allow-empty' % newpv)
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
+ __run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
(stdout, _) = __run('git rev-parse HEAD')
@@ -228,16 +242,22 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, branch, keep_temp, tin
for patch in patches:
logger.warn("%s" % os.path.basename(patch))
else:
+ __run('git checkout devtool-patched -b %s' % branch)
+ skiptag = False
try:
- __run('git checkout devtool-patched -b %s' % branch)
__run('git rebase %s' % rev)
+ except bb.process.ExecutionError as e:
+ skiptag = True
+ if 'conflict' in e.stdout:
+ logger.warn('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ else:
+ logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ if not skiptag:
if uri.startswith('git://'):
suffix = 'new'
else:
suffix = newpv
__run('git tag -f devtool-patched-%s' % suffix)
- except bb.process.ExecutionError as e:
- logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
if tmpsrctree:
if keep_temp:
@@ -253,7 +273,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil
bpn = rd.getVar('BPN', True)
path = os.path.join(workspace, 'recipes', bpn)
bb.utils.mkdirhier(path)
- oe.recipeutils.copy_recipe_files(rd, path)
+ copied, _ = oe.recipeutils.copy_recipe_files(rd, path)
oldpv = rd.getVar('PV', True)
if not newpv:
@@ -300,10 +320,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil
newvalues['SRC_URI[md5sum]'] = md5
newvalues['SRC_URI[sha256sum]'] = sha256
- rd = oe.recipeutils.parse_recipe(fullpath, None, tinfoil.config_data)
+ rd = oe.recipeutils.parse_recipe(tinfoil.cooker, fullpath, None)
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
- return fullpath
+ return fullpath, copied
def upgrade(args, config, basepath, workspace):
"""Entry point for the devtool 'upgrade' subcommand"""
@@ -316,48 +336,51 @@ def upgrade(args, config, basepath, workspace):
raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
- rd = parse_recipe(config, tinfoil, args.recipename, True)
- if not rd:
- return 1
-
- pn = rd.getVar('PN', True)
- if pn != args.recipename:
- logger.info('Mapping %s to %s' % (args.recipename, pn))
- if pn in workspace:
- raise DevtoolError("recipe %s is already in your workspace" % pn)
-
- if args.srctree:
- srctree = os.path.abspath(args.srctree)
- else:
- srctree = standard.get_default_srctree(config, pn)
-
- standard._check_compatible_recipe(pn, rd)
- old_srcrev = rd.getVar('SRCREV', True)
- if old_srcrev == 'INVALID':
- old_srcrev = None
- if old_srcrev and not args.srcrev:
- raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
- if rd.getVar('PV', True) == args.version and old_srcrev == args.srcrev:
- raise DevtoolError("Current and upgrade versions are the same version")
-
- rf = None
try:
- rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd)
- rev2, md5, sha256 = _extract_new_source(args.version, srctree, args.no_patch,
- args.srcrev, args.branch, args.keep_temp,
- tinfoil, rd)
- rf = _create_new_recipe(args.version, md5, sha256, args.srcrev, args.srcbranch, config.workspace_path, tinfoil, rd)
- except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree)
- except DevtoolError as e:
- _upgrade_error(e, rf, srctree)
- standard._add_md5(config, pn, os.path.dirname(rf))
-
- af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
- config.workspace_path, rd)
- standard._add_md5(config, pn, af)
- logger.info('Upgraded source extracted to %s' % srctree)
- logger.info('New recipe is %s' % rf)
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ pn = rd.getVar('PN', True)
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" % pn)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = standard.get_default_srctree(config, pn)
+
+ standard._check_compatible_recipe(pn, rd)
+ old_srcrev = rd.getVar('SRCREV', True)
+ if old_srcrev == 'INVALID':
+ old_srcrev = None
+ if old_srcrev and not args.srcrev:
+ raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
+ if rd.getVar('PV', True) == args.version and old_srcrev == args.srcrev:
+ raise DevtoolError("Current and upgrade versions are the same version")
+
+ rf = None
+ try:
+ rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd)
+ rev2, md5, sha256 = _extract_new_source(args.version, srctree, args.no_patch,
+ args.srcrev, args.branch, args.keep_temp,
+ tinfoil, rd)
+ rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, args.srcbranch, config.workspace_path, tinfoil, rd)
+ except bb.process.CmdError as e:
+ _upgrade_error(e, rf, srctree)
+ except DevtoolError as e:
+ _upgrade_error(e, rf, srctree)
+ standard._add_md5(config, pn, os.path.dirname(rf))
+
+ af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ copied, config.workspace_path, rd)
+ standard._add_md5(config, pn, af)
+ logger.info('Upgraded source extracted to %s' % srctree)
+ logger.info('New recipe is %s' % rf)
+ finally:
+ tinfoil.shutdown()
return 0
def register_commands(subparsers, context):
@@ -371,7 +394,7 @@ def register_commands(subparsers, context):
parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV)')
- parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (if fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (required if fetching from an SCM such as git)')
parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/append.py b/import-layers/yocto-poky/scripts/lib/recipetool/append.py
index 558fd25ac..1e0fc1ee8 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/append.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/append.py
@@ -61,7 +61,7 @@ def find_target_file(targetpath, d, pkglist=None):
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
'${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
- for pthspec, message in invalidtargets.iteritems():
+ for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
raise InvalidTargetFileError(d.expand(message))
@@ -90,7 +90,7 @@ def find_target_file(targetpath, d, pkglist=None):
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
- scriptval = line.split(':', 1)[1].strip().decode('string_escape')
+ scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
@@ -115,8 +115,7 @@ def _parse_recipe(pn, tinfoil):
# Error already logged
return None
append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
- rd = oe.recipeutils.parse_recipe(recipefile, append_files,
- tinfoil.config_data)
+ rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
return rd
def determine_file_source(targetpath, rd):
@@ -152,7 +151,7 @@ def determine_file_source(targetpath, rd):
# Check patches
srcpatches = []
patchedfiles = oe.recipeutils.get_recipe_patched_files(rd)
- for patch, filelist in patchedfiles.iteritems():
+ for patch, filelist in patchedfiles.items():
for fileitem in filelist:
if fileitem[0] == srcpath:
srcpatches.append((patch, fileitem[1]))
@@ -172,7 +171,7 @@ def get_source_path(cmdelements):
"""Find the source path specified within a command"""
command = cmdelements[0]
if command in ['install', 'cp']:
- helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True)
+ helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True).decode('utf-8')
argopts = ''
argopt_line_re = re.compile('^-([a-zA-Z0-9]), --[a-z-]+=')
for line in helptext.splitlines():
@@ -270,7 +269,7 @@ def appendfile(args):
postinst_pns = []
selectpn = None
- for targetpath, pnlist in recipes.iteritems():
+ for targetpath, pnlist in recipes.items():
for pn in pnlist:
if pn.startswith('?'):
alternative_pns.append(pn[1:])
@@ -351,7 +350,7 @@ def appendsrc(args, files, rd, extralines=None):
copyfiles = {}
extralines = extralines or []
- for newfile, srcfile in files.iteritems():
+ for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
if rd.getVar('S', True) == rd.getVar('STAGING_KERNEL_DIR', True):
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create.py b/import-layers/yocto-poky/scripts/lib/recipetool/create.py
index bb9fb9b04..d427d3206 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create.py
@@ -24,7 +24,7 @@ import re
import json
import logging
import scriptutils
-import urlparse
+from urllib.parse import urlparse, urldefrag, urlsplit
import hashlib
logger = logging.getLogger('recipetool')
@@ -61,8 +61,8 @@ class RecipeHandler(object):
libpaths = list(set([base_libdir, libdir]))
libname_re = re.compile('^lib(.+)\.so.*$')
pkglibmap = {}
- for lib, item in shlib_providers.iteritems():
- for path, pkg in item.iteritems():
+ for lib, item in shlib_providers.items():
+ for path, pkg in item.items():
if path in libpaths:
res = libname_re.match(lib)
if res:
@@ -74,7 +74,7 @@ class RecipeHandler(object):
# Now turn it into a library->recipe mapping
pkgdata_dir = d.getVar('PKGDATA_DIR', True)
- for libname, pkg in pkglibmap.iteritems():
+ for libname, pkg in pkglibmap.items():
try:
with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
for line in f:
@@ -167,7 +167,7 @@ class RecipeHandler(object):
unmappedpc = []
pcdeps = list(set(pcdeps))
for pcdep in pcdeps:
- if isinstance(pcdep, basestring):
+ if isinstance(pcdep, str):
recipe = recipemap.get(pcdep, None)
if recipe:
deps.append(recipe)
@@ -256,30 +256,56 @@ def validate_pv(pv):
def determine_from_filename(srcfile):
"""Determine name and version from a filename"""
- part = ''
+ if is_package(srcfile):
+ # Force getting the value from the package metadata
+ return None, None
+
if '.tar.' in srcfile:
- namepart = srcfile.split('.tar.')[0].lower()
+ namepart = srcfile.split('.tar.')[0]
else:
- namepart = os.path.splitext(srcfile)[0].lower()
- splitval = namepart.rsplit('_', 1)
+ namepart = os.path.splitext(srcfile)[0]
+ namepart = namepart.lower().replace('_', '-')
+ if namepart.endswith('.src'):
+ namepart = namepart[:-4]
+ if namepart.endswith('.orig'):
+ namepart = namepart[:-5]
+ splitval = namepart.split('-')
+ logger.debug('determine_from_filename: split name %s into: %s' % (srcfile, splitval))
+
+ ver_re = re.compile('^v?[0-9]')
+
+ pv = None
+ pn = None
if len(splitval) == 1:
- splitval = namepart.rsplit('-', 1)
- pn = splitval[0].replace('_', '-')
- if len(splitval) > 1:
- if splitval[1][0] in '0123456789':
- pv = splitval[1]
+ # Try to split the version out if there is no separator (or a .)
+ res = re.match('^([^0-9]+)([0-9.]+.*)$', namepart)
+ if res:
+ if len(res.group(1)) > 1 and len(res.group(2)) > 1:
+ pn = res.group(1).rstrip('.')
+ pv = res.group(2)
else:
- pn = '-'.join(splitval).replace('_', '-')
- pv = None
+ pn = namepart
else:
- pv = None
+ if splitval[-1] in ['source', 'src']:
+ splitval.pop()
+ if len(splitval) > 2 and re.match('^(alpha|beta|stable|release|rc[0-9]|pre[0-9]|p[0-9]|[0-9]{8})', splitval[-1]) and ver_re.match(splitval[-2]):
+ pv = '-'.join(splitval[-2:])
+ if pv.endswith('-release'):
+ pv = pv[:-8]
+ splitval = splitval[:-2]
+ elif ver_re.match(splitval[-1]):
+ pv = splitval.pop()
+ pn = '-'.join(splitval)
+ if pv and pv.startswith('v'):
+ pv = pv[1:]
+ logger.debug('determine_from_filename: name = "%s" version = "%s"' % (pn, pv))
return (pn, pv)
def determine_from_url(srcuri):
"""Determine name and version from a URL"""
pn = None
pv = None
- parseres = urlparse.urlparse(srcuri.lower().split(';', 1)[0])
+ parseres = urlparse(srcuri.lower().split(';', 1)[0])
if parseres.path:
if 'github.com' in parseres.netloc:
res = re.search(r'.*/(.*?)/archive/(.*)-final\.(tar|zip)', parseres.path)
@@ -310,43 +336,66 @@ def supports_srcrev(uri):
# odd interactions with the urldata cache which lead to errors
localdata.setVar('SRCREV', '${AUTOREV}')
bb.data.update_data(localdata)
- fetcher = bb.fetch2.Fetch([uri], localdata)
- urldata = fetcher.ud
- for u in urldata:
- if urldata[u].method.supports_srcrev():
+ try:
+ fetcher = bb.fetch2.Fetch([uri], localdata)
+ urldata = fetcher.ud
+ for u in urldata:
+ if urldata[u].method.supports_srcrev():
+ return True
+ except bb.fetch2.FetchError as e:
+ logger.debug('FetchError in supports_srcrev: %s' % str(e))
+ # Fall back to basic check
+ if uri.startswith(('git://', 'gitsm://')):
return True
return False
def reformat_git_uri(uri):
'''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
checkuri = uri.split(';', 1)[0]
- if checkuri.endswith('.git') or '/git/' in checkuri:
- res = re.match('(https?)://([^;]+(\.git)?)(;.*)?$', uri)
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ res = re.match('(http|https|ssh)://([^;]+(\.git)?)(;.*)?$', uri)
if res:
# Need to switch the URI around so that the git fetcher is used
return 'git://%s;protocol=%s%s' % (res.group(2), res.group(1), res.group(4) or '')
+ elif '@' in checkuri:
+ # Catch e.g. git@git.example.com:repo.git
+ return 'git://%s;protocol=ssh' % checkuri.replace(':', '/', 1)
return uri
+def is_package(url):
+ '''Check if a URL points to a package'''
+ checkurl = url.split(';', 1)[0]
+ if checkurl.endswith(('.deb', '.ipk', '.rpm', '.srpm')):
+ return True
+ return False
+
def create_recipe(args):
import bb.process
import tempfile
import shutil
+ import oe.recipeutils
pkgarch = ""
if args.machine:
pkgarch = "${MACHINE_ARCH}"
+ extravalues = {}
checksums = (None, None)
tempsrc = ''
+ source = args.source
srcsubdir = ''
srcrev = '${AUTOREV}'
- if '://' in args.source:
+
+ if os.path.isfile(source):
+ source = 'file://%s' % os.path.abspath(source)
+
+ if scriptutils.is_src_url(source):
# Fetch a URL
- fetchuri = reformat_git_uri(urlparse.urldefrag(args.source)[0])
+ fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
- fetchuri += ';subdir=%s' % os.path.splitext(os.path.basename(urlparse.urlsplit(fetchuri).path))[0]
+ fetchuri += ';subdir=${BP}'
srcuri = fetchuri
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
@@ -357,10 +406,7 @@ def create_recipe(args):
srctree = tempsrc
if fetchuri.startswith('npm://'):
# Check if npm is available
- npm = bb.utils.which(tinfoil.config_data.getVar('PATH', True), 'npm')
- if not npm:
- logger.error('npm:// URL requested but npm is not available - you need to either build nodejs-native or install npm using your package manager')
- sys.exit(1)
+ check_npm(tinfoil.config_data)
logger.info('Fetching %s...' % srcuri)
try:
checksums = scriptutils.fetch_uri(tinfoil.config_data, fetchuri, srctree, srcrev)
@@ -377,19 +423,50 @@ def create_recipe(args):
srcsubdir = dirlist[0]
srctree = os.path.join(srctree, srcsubdir)
else:
- with open(singleitem, 'r') as f:
+ with open(singleitem, 'r', errors='surrogateescape') as f:
if '<html' in f.read(100).lower():
logger.error('Fetching "%s" returned a single HTML page - check the URL is correct and functional' % fetchuri)
sys.exit(1)
+ if os.path.exists(os.path.join(srctree, '.gitmodules')) and srcuri.startswith('git://'):
+ srcuri = 'gitsm://' + srcuri[6:]
+ logger.info('Fetching submodules...')
+ bb.process.run('git submodule update --init --recursive', cwd=srctree)
+
+ if is_package(fetchuri):
+ tmpfdir = tempfile.mkdtemp(prefix='recipetool-')
+ try:
+ pkgfile = None
+ try:
+ fileuri = fetchuri + ';unpack=0'
+ scriptutils.fetch_uri(tinfoil.config_data, fileuri, tmpfdir, srcrev)
+ for root, _, files in os.walk(tmpfdir):
+ for f in files:
+ pkgfile = os.path.join(root, f)
+ break
+ except bb.fetch2.BBFetchException as e:
+ logger.warn('Second fetch to get metadata failed: %s' % str(e).rstrip())
+
+ if pkgfile:
+ if pkgfile.endswith(('.deb', '.ipk')):
+ stdout, _ = bb.process.run('ar x %s control.tar.gz' % pkgfile, cwd=tmpfdir)
+ stdout, _ = bb.process.run('tar xf control.tar.gz ./control', cwd=tmpfdir)
+ values = convert_debian(tmpfdir)
+ extravalues.update(values)
+ elif pkgfile.endswith(('.rpm', '.srpm')):
+ stdout, _ = bb.process.run('rpm -qp --xml %s > pkginfo.xml' % pkgfile, cwd=tmpfdir)
+ values = convert_rpm_xml(os.path.join(tmpfdir, 'pkginfo.xml'))
+ extravalues.update(values)
+ finally:
+ shutil.rmtree(tmpfdir)
else:
# Assume we're pointing to an existing source tree
if args.extract_to:
logger.error('--extract-to cannot be specified if source is a directory')
sys.exit(1)
- if not os.path.isdir(args.source):
- logger.error('Invalid source directory %s' % args.source)
+ if not os.path.isdir(source):
+ logger.error('Invalid source directory %s' % source)
sys.exit(1)
- srctree = args.source
+ srctree = source
srcuri = ''
if os.path.exists(os.path.join(srctree, '.git')):
# Try to get upstream repo location from origin remote
@@ -401,7 +478,7 @@ def create_recipe(args):
for line in stdout.splitlines():
splitline = line.split()
if len(splitline) > 1:
- if splitline[0] == 'origin' and '://' in splitline[1]:
+ if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
srcuri = reformat_git_uri(splitline[1])
srcsubdir = 'git'
break
@@ -429,37 +506,12 @@ def create_recipe(args):
lines_before.append('# Recipe created by %s' % os.path.basename(sys.argv[0]))
lines_before.append('# This is the basis of a recipe and may need further editing in order to be fully functional.')
lines_before.append('# (Feel free to remove these comments when editing.)')
- lines_before.append('#')
-
- licvalues = guess_license(srctree_use)
- lic_files_chksum = []
- if licvalues:
- licenses = []
- for licvalue in licvalues:
- if not licvalue[0] in licenses:
- licenses.append(licvalue[0])
- lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
- lines_before.append('# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
- lines_before.append('# your responsibility to verify that the values are complete and correct.')
- if len(licvalues) > 1:
- lines_before.append('#')
- lines_before.append('# NOTE: multiple licenses have been detected; if that is correct you should separate')
- lines_before.append('# these in the LICENSE value using & if the multiple licenses all apply, or | if there')
- lines_before.append('# is a choice between the multiple licenses. If in doubt, check the accompanying')
- lines_before.append('# documentation to determine which situation is applicable.')
- else:
- lines_before.append('# Unable to find any files that looked like license statements. Check the accompanying')
- lines_before.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
- lines_before.append('#')
- lines_before.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
- lines_before.append('# this is not accurate with respect to the licensing of the software being built (it')
- lines_before.append('# will not be in most cases) you must specify the correct value before using this')
- lines_before.append('# recipe for anything other than initial testing/development!')
- licenses = ['CLOSED']
- lines_before.append('LICENSE = "%s"' % ' '.join(licenses))
- lines_before.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
+ # We need a blank line here so that patch_recipe_lines can rewind before the LICENSE comments
lines_before.append('')
+ handled = []
+ licvalues = handle_license_vars(srctree_use, lines_before, handled, extravalues, tinfoil.config_data)
+
classes = []
# FIXME This is kind of a hack, we probably ought to be using bitbake to do this
@@ -515,10 +567,16 @@ def create_recipe(args):
lines_before.append('')
lines_before.append('# Modify these as desired')
lines_before.append('PV = "%s+git${SRCPV}"' % (realpv or '1.0'))
+ if not args.autorev and srcrev == '${AUTOREV}':
+ if os.path.exists(os.path.join(srctree, '.git')):
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ srcrev = stdout.rstrip()
lines_before.append('SRCREV = "%s"' % srcrev)
lines_before.append('')
- if srcsubdir:
+ if srcsubdir and not args.binary:
+ # (for binary packages we explicitly specify subdir= when fetching to
+ # match the default value of S, so we don't need to set it in that case)
lines_before.append('S = "${WORKDIR}/%s"' % srcsubdir)
lines_before.append('')
@@ -549,40 +607,36 @@ def create_recipe(args):
handlers = [item[0] for item in handlers]
# Apply the handlers
- handled = []
- handled.append(('license', licvalues))
-
if args.binary:
classes.append('bin_package')
handled.append('buildsystem')
- extravalues = {}
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
extrafiles = extravalues.pop('extrafiles', {})
+ extra_pn = extravalues.pop('PN', None)
+ extra_pv = extravalues.pop('PV', None)
- if not realpv:
- realpv = extravalues.get('PV', None)
- if realpv:
- if not validate_pv(realpv):
- realpv = None
- else:
- realpv = realpv.lower().split()[0]
- if '_' in realpv:
- realpv = realpv.replace('_', '-')
- if not pn:
- pn = extravalues.get('PN', None)
- if pn:
- if pn.startswith('GNU '):
- pn = pn[4:]
- if ' ' in pn:
- # Probably a descriptive identifier rather than a proper name
- pn = None
- else:
- pn = pn.lower()
- if '_' in pn:
- pn = pn.replace('_', '-')
+ if extra_pv and not realpv:
+ realpv = extra_pv
+ if not validate_pv(realpv):
+ realpv = None
+ else:
+ realpv = realpv.lower().split()[0]
+ if '_' in realpv:
+ realpv = realpv.replace('_', '-')
+ if extra_pn and not pn:
+ pn = extra_pn
+ if pn.startswith('GNU '):
+ pn = pn[4:]
+ if ' ' in pn:
+ # Probably a descriptive identifier rather than a proper name
+ pn = None
+ else:
+ pn = pn.lower()
+ if '_' in pn:
+ pn = pn.replace('_', '-')
if not outfile:
if not pn:
@@ -590,8 +644,11 @@ def create_recipe(args):
# devtool looks for this specific exit code, so don't change it
sys.exit(15)
else:
- if srcuri and srcuri.startswith(('git://', 'hg://', 'svn://')):
- outfile = '%s_%s.bb' % (pn, srcuri.split(':', 1)[0])
+ if srcuri and srcuri.startswith(('gitsm://', 'git://', 'hg://', 'svn://')):
+ suffix = srcuri.split(':', 1)[0]
+ if suffix == 'gitsm':
+ suffix = 'git'
+ outfile = '%s_%s.bb' % (pn, suffix)
elif realpv:
outfile = '%s_%s.bb' % (pn, realpv)
else:
@@ -610,7 +667,7 @@ def create_recipe(args):
else:
extraoutdir = os.path.join(os.path.dirname(outfile), pn)
bb.utils.mkdirhier(extraoutdir)
- for destfn, extrafile in extrafiles.iteritems():
+ for destfn, extrafile in extrafiles.items():
shutil.move(extrafile, os.path.join(extraoutdir, destfn))
lines = lines_before
@@ -662,6 +719,12 @@ def create_recipe(args):
outlines.append('')
outlines.extend(lines_after)
+ if extravalues:
+ if 'LICENSE' in extravalues and not licvalues:
+ # Don't blow away 'CLOSED' value that comments say we set
+ del extravalues['LICENSE']
+ _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
+
if args.extract_to:
scriptutils.git_convert_standalone_clone(srctree)
if os.path.isdir(args.extract_to):
@@ -679,14 +742,72 @@ def create_recipe(args):
sys.stdout.write('\n'.join(outlines) + '\n')
else:
with open(outfile, 'w') as f:
- f.write('\n'.join(outlines) + '\n')
+ lastline = None
+ for line in outlines:
+ if not lastline and not line:
+ # Skip extra blank lines
+ continue
+ f.write('%s\n' % line)
+ lastline = line
logger.info('Recipe %s has been created; further editing may be required to make it fully functional' % outfile)
if tempsrc:
- shutil.rmtree(tempsrc)
+ if args.keep_temp:
+ logger.info('Preserving temporary directory %s' % tempsrc)
+ else:
+ shutil.rmtree(tempsrc)
return 0
+def handle_license_vars(srctree, lines_before, handled, extravalues, d):
+ licvalues = guess_license(srctree, d)
+ lic_files_chksum = []
+ lic_unknown = []
+ if licvalues:
+ licenses = []
+ for licvalue in licvalues:
+ if not licvalue[0] in licenses:
+ licenses.append(licvalue[0])
+ lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
+ if licvalue[0] == 'Unknown':
+ lic_unknown.append(licvalue[1])
+ lines_before.append('# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
+ lines_before.append('# your responsibility to verify that the values are complete and correct.')
+ if len(licvalues) > 1:
+ lines_before.append('#')
+ lines_before.append('# NOTE: multiple licenses have been detected; if that is correct you should separate')
+ lines_before.append('# these in the LICENSE value using & if the multiple licenses all apply, or | if there')
+ lines_before.append('# is a choice between the multiple licenses. If in doubt, check the accompanying')
+ lines_before.append('# documentation to determine which situation is applicable.')
+ if lic_unknown:
+ lines_before.append('#')
+ lines_before.append('# The following license files were not able to be identified and are')
+ lines_before.append('# represented as "Unknown" below, you will need to check them yourself:')
+ for licfile in lic_unknown:
+ lines_before.append('# %s' % licfile)
+ lines_before.append('#')
+ else:
+ lines_before.append('# Unable to find any files that looked like license statements. Check the accompanying')
+ lines_before.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
+ lines_before.append('#')
+ lines_before.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
+ lines_before.append('# this is not accurate with respect to the licensing of the software being built (it')
+ lines_before.append('# will not be in most cases) you must specify the correct value before using this')
+ lines_before.append('# recipe for anything other than initial testing/development!')
+ licenses = ['CLOSED']
+ pkg_license = extravalues.pop('LICENSE', None)
+ if pkg_license:
+ if licenses == ['Unknown']:
+ lines_before.append('# NOTE: The following LICENSE value was determined from the original package metadata')
+ licenses = [pkg_license]
+ else:
+ lines_before.append('# NOTE: Original package metadata indicates license is: %s' % pkg_license)
+ lines_before.append('LICENSE = "%s"' % ' '.join(licenses))
+ lines_before.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
+ lines_before.append('')
+ handled.append(('license', licvalues))
+ return licvalues
+
def get_license_md5sums(d, static_only=False):
import bb.utils
md5sums = {}
@@ -751,7 +872,7 @@ def crunch_license(licfile):
# Note: these are carefully constructed!
license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
- license_statement_re = re.compile('^This (project|software) is( free software)? released under the .{1,10} [Ll]icen[sc]e:?$')
+ license_statement_re = re.compile('^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
copyright_re = re.compile('^(#+)? *Copyright .*$')
crunched_md5sums = {}
@@ -781,7 +902,7 @@ def crunch_license(licfile):
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
lictext = []
- with open(licfile, 'r') as f:
+ with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
# Drop opening statements
if copyright_re.match(line):
@@ -792,14 +913,14 @@ def crunch_license(licfile):
continue
# Squash spaces, and replace smart quotes, double quotes
# and backticks with single quotes
- line = oe.utils.squashspaces(line.strip()).decode("utf-8")
+ line = oe.utils.squashspaces(line.strip())
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
if line:
lictext.append(line)
m = hashlib.md5()
try:
- m.update(' '.join(lictext))
+ m.update(' '.join(lictext).encode('utf-8'))
md5val = m.hexdigest()
except UnicodeEncodeError:
md5val = None
@@ -807,9 +928,9 @@ def crunch_license(licfile):
license = crunched_md5sums.get(md5val, None)
return license, md5val, lictext
-def guess_license(srctree):
+def guess_license(srctree, d):
import bb
- md5sums = get_license_md5sums(tinfoil.config_data)
+ md5sums = get_license_md5sums(d)
licenses = []
licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*']
@@ -842,7 +963,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
"""
pkglicenses = {pn: []}
for license, licpath, _ in licvalues:
- for pkgname, pkgpath in packages.iteritems():
+ for pkgname, pkgpath in packages.items():
if licpath.startswith(pkgpath + '/'):
if pkgname in pkglicenses:
pkglicenses[pkgname].append(license)
@@ -854,7 +975,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
pkglicenses[pn].append(license)
outlicenses = {}
for pkgname in packages:
- license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown']))))
+ license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
if license == 'Unknown' and pkgname in fallback_licenses:
license = fallback_licenses[pkgname]
outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
@@ -869,7 +990,7 @@ def read_pkgconfig_provides(d):
for line in f:
pkgmap[os.path.basename(line.rstrip())] = os.path.splitext(os.path.basename(fn))[0]
recipemap = {}
- for pc, pkg in pkgmap.iteritems():
+ for pc, pkg in pkgmap.items():
pkgdatafile = os.path.join(pkgdatadir, 'runtime', pkg)
if os.path.exists(pkgdatafile):
with open(pkgdatafile, 'r') as f:
@@ -878,73 +999,84 @@ def read_pkgconfig_provides(d):
recipemap[pc] = line.split(':', 1)[1].strip()
return recipemap
-def convert_pkginfo(pkginfofile):
- values = {}
- with open(pkginfofile, 'r') as f:
- indesc = False
- for line in f:
- if indesc:
- if line.strip():
- values['DESCRIPTION'] += ' ' + line.strip()
- else:
- indesc = False
- else:
- splitline = line.split(': ', 1)
- key = line[0]
- value = line[1]
- if key == 'LICENSE':
- for dep in value.split(','):
- dep = dep.split()[0]
- mapped = depmap.get(dep, '')
- if mapped:
- depends.append(mapped)
- elif key == 'License':
- values['LICENSE'] = value
- elif key == 'Summary':
- values['SUMMARY'] = value
- elif key == 'Description':
- values['DESCRIPTION'] = value
- indesc = True
- return values
-
def convert_debian(debpath):
+ value_map = {'Package': 'PN',
+ 'Version': 'PV',
+ 'Section': 'SECTION',
+ 'License': 'LICENSE',
+ 'Homepage': 'HOMEPAGE'}
+
# FIXME extend this mapping - perhaps use distro_alias.inc?
depmap = {'libz-dev': 'zlib'}
values = {}
depends = []
- with open(os.path.join(debpath, 'control')) as f:
+ with open(os.path.join(debpath, 'control'), 'r', errors='surrogateescape') as f:
indesc = False
for line in f:
if indesc:
- if line.strip():
+ if line.startswith(' '):
if line.startswith(' This package contains'):
indesc = False
else:
- values['DESCRIPTION'] += ' ' + line.strip()
+ if 'DESCRIPTION' in values:
+ values['DESCRIPTION'] += ' ' + line.strip()
+ else:
+ values['DESCRIPTION'] = line.strip()
else:
indesc = False
- else:
+ if not indesc:
splitline = line.split(':', 1)
- key = line[0]
- value = line[1]
+ if len(splitline) < 2:
+ continue
+ key = splitline[0]
+ value = splitline[1].strip()
if key == 'Build-Depends':
for dep in value.split(','):
dep = dep.split()[0]
mapped = depmap.get(dep, '')
if mapped:
depends.append(mapped)
- elif key == 'Section':
- values['SECTION'] = value
elif key == 'Description':
values['SUMMARY'] = value
indesc = True
+ else:
+ varname = value_map.get(key, None)
+ if varname:
+ values[varname] = value
- if depends:
- values['DEPENDS'] = ' '.join(depends)
+ #if depends:
+ # values['DEPENDS'] = ' '.join(depends)
return values
+def convert_rpm_xml(xmlfile):
+ '''Converts the output from rpm -qp --xml to a set of variable values'''
+ import xml.etree.ElementTree as ElementTree
+ rpmtag_map = {'Name': 'PN',
+ 'Version': 'PV',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Url': 'HOMEPAGE'}
+
+ values = {}
+ tree = ElementTree.parse(xmlfile)
+ root = tree.getroot()
+ for child in root:
+ if child.tag == 'rpmTag':
+ name = child.attrib.get('name', None)
+ if name:
+ varname = rpmtag_map.get(name, None)
+ if varname:
+ values[varname] = child[0].text
+ return values
+
+
+def check_npm(d):
+ if not os.path.exists(os.path.join(d.getVar('STAGING_BINDIR_NATIVE', True), 'npm')):
+ logger.error('npm required to process specified source, but npm is not available - you need to build nodejs-native first')
+ sys.exit(14)
def register_commands(subparsers):
parser_create = subparsers.add_parser('create',
@@ -959,5 +1091,7 @@ def register_commands(subparsers):
parser_create.add_argument('-b', '--binary', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure)', action='store_true')
parser_create.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
parser_create.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
+ parser_create.add_argument('-a', '--autorev', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
parser_create.set_defaults(func=create_recipe)
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys.py
index f84ec3dc6..e914e53aa 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys.py
@@ -44,7 +44,7 @@ class CmakeRecipeHandler(RecipeHandler):
classes.append('cmake')
values = CmakeRecipeHandler.extract_cmake_deps(lines_before, srctree, extravalues)
classes.extend(values.pop('inherit', '').split())
- for var, value in values.iteritems():
+ for var, value in values.items():
lines_before.append('%s = "%s"' % (var, value))
lines_after.append('# Specify any options you want to pass to cmake using EXTRA_OECMAKE:')
lines_after.append('EXTRA_OECMAKE = ""')
@@ -159,7 +159,7 @@ class CmakeRecipeHandler(RecipeHandler):
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
- for fn, pn in RecipeHandler.recipecmakefilemap.iteritems():
+ for fn, pn in RecipeHandler.recipecmakefilemap.items():
splitname = fn.split('/')
if len(splitname) > 1:
if splitname[0].lower().startswith(pkg.lower()):
@@ -173,7 +173,7 @@ class CmakeRecipeHandler(RecipeHandler):
def parse_cmake_file(fn, paths=None):
searchpaths = (paths or []) + [os.path.dirname(fn)]
logger.debug('Parsing file %s' % fn)
- with open(fn, 'r') as f:
+ with open(fn, 'r', errors='surrogateescape') as f:
for line in f:
line = line.strip()
for handler in handlers:
@@ -348,13 +348,13 @@ class AutotoolsRecipeHandler(RecipeHandler):
autoconf = True
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, extravalues)
classes.extend(values.pop('inherit', '').split())
- for var, value in values.iteritems():
+ for var, value in values.items():
lines_before.append('%s = "%s"' % (var, value))
else:
conffile = RecipeHandler.checkfiles(srctree, ['configure'])
if conffile:
# Check if this is just a pre-generated autoconf configure script
- with open(conffile[0], 'r') as f:
+ with open(conffile[0], 'r', errors='surrogateescape') as f:
for i in range(1, 10):
if 'Generated by GNU Autoconf' in f.readline():
autoconf = True
@@ -364,7 +364,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
# Last resort
conffile = RecipeHandler.checkfiles(srctree, ['configure'])
if conffile:
- with open(conffile[0], 'r') as f:
+ with open(conffile[0], 'r', errors='surrogateescape') as f:
for line in f:
line = line.strip()
if line.startswith('VERSION=') or line.startswith('PACKAGE_VERSION='):
@@ -442,11 +442,12 @@ class AutotoolsRecipeHandler(RecipeHandler):
ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile('([0-9.]+)')
defines = {}
def subst_defines(value):
newvalue = value
- for define, defval in defines.iteritems():
+ for define, defval in defines.items():
newvalue = newvalue.replace(define, defval)
if newvalue != value:
return subst_defines(newvalue)
@@ -488,6 +489,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
for handler in handlers:
if handler.process_macro(srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
return
+ logger.debug('Found keyword %s with value "%s"' % (keyword, value))
if keyword == 'PKG_CHECK_MODULES':
res = pkg_re.search(value)
if res:
@@ -569,10 +571,21 @@ class AutotoolsRecipeHandler(RecipeHandler):
deps.append('sqlite3')
elif keyword == 'AX_LIB_TAGLIB':
deps.append('taglib')
- elif keyword == 'AX_PKG_SWIG':
- deps.append('swig')
+ elif keyword in ['AX_PKG_SWIG', 'AC_PROG_SWIG']:
+ deps.append('swig-native')
elif keyword == 'AX_PROG_XSLTPROC':
deps.append('libxslt-native')
+ elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
+ pythonclass = 'pythonnative'
+ res = version_re.search(value)
+ if res:
+ if res.group(1).startswith('3'):
+ pythonclass = 'python3native'
+ # Avoid replacing python3native with pythonnative
+ if not pythonclass in inherits and not 'python3native' in inherits:
+ if 'pythonnative' in inherits:
+ inherits.remove('pythonnative')
+ inherits.append(pythonclass)
elif keyword == 'AX_WITH_CURSES':
deps.append('ncurses')
elif keyword == 'AX_PATH_BDB':
@@ -638,7 +651,11 @@ class AutotoolsRecipeHandler(RecipeHandler):
'AX_LIB_SQLITE3',
'AX_LIB_TAGLIB',
'AX_PKG_SWIG',
+ 'AC_PROG_SWIG',
'AX_PROG_XSLTPROC',
+ 'AC_PYTHON_DEVEL',
+ 'AX_PYTHON_DEVEL',
+ 'AM_PATH_PYTHON',
'AX_WITH_CURSES',
'AX_PATH_BDB',
'AX_PATH_LIB_PCRE',
@@ -654,7 +671,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
nesting = 0
in_keyword = ''
partial = ''
- with open(srcfile, 'r') as f:
+ with open(srcfile, 'r', errors='surrogateescape') as f:
for line in f:
if in_keyword:
partial += ' ' + line.strip()
@@ -682,7 +699,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
process_macro(in_keyword, partial)
if extravalues:
- for k,v in extravalues.items():
+ for k,v in list(extravalues.items()):
if v:
if v.startswith('$') or v.startswith('@') or v.startswith('%'):
del extravalues[k]
@@ -737,7 +754,7 @@ class MakefileRecipeHandler(RecipeHandler):
if 'buildsystem' in handled:
return False
- makefile = RecipeHandler.checkfiles(srctree, ['Makefile'])
+ makefile = RecipeHandler.checkfiles(srctree, ['Makefile', 'makefile', 'GNUmakefile'])
if makefile:
lines_after.append('# NOTE: this is a Makefile-only piece of software, so we cannot generate much of the')
lines_after.append('# recipe automatically - you will need to examine the Makefile yourself and ensure')
@@ -753,7 +770,7 @@ class MakefileRecipeHandler(RecipeHandler):
if scanfile and os.path.exists(scanfile):
values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, acfile=scanfile)
classes.extend(values.pop('inherit', '').split())
- for var, value in values.iteritems():
+ for var, value in values.items():
if var == 'DEPENDS':
lines_before.append('# NOTE: some of these dependencies may be optional, check the Makefile and/or upstream documentation')
lines_before.append('%s = "%s"' % (var, value))
@@ -780,7 +797,7 @@ class MakefileRecipeHandler(RecipeHandler):
if installtarget:
func.append('# This is a guess; additional arguments may be required')
makeargs = ''
- with open(makefile[0], 'r') as f:
+ with open(makefile[0], 'r', errors='surrogateescape') as f:
for i in range(1, 100):
if 'DESTDIR' in f.readline():
makeargs += " 'DESTDIR=${D}'"
@@ -809,7 +826,7 @@ class VersionFileRecipeHandler(RecipeHandler):
version = None
for fileitem in filelist:
linecount = 0
- with open(fileitem, 'r') as f:
+ with open(fileitem, 'r', errors='surrogateescape') as f:
for line in f:
line = line.rstrip().strip('"\'')
linecount += 1
@@ -830,22 +847,35 @@ class SpecFileRecipeHandler(RecipeHandler):
if 'PV' in extravalues and 'PN' in extravalues:
return
filelist = RecipeHandler.checkfiles(srctree, ['*.spec'], recursive=True)
- pn = None
- pv = None
+ valuemap = {'Name': 'PN',
+ 'Version': 'PV',
+ 'Summary': 'SUMMARY',
+ 'Url': 'HOMEPAGE',
+ 'License': 'LICENSE'}
+ foundvalues = {}
for fileitem in filelist:
linecount = 0
- with open(fileitem, 'r') as f:
+ with open(fileitem, 'r', errors='surrogateescape') as f:
for line in f:
- if line.startswith('Name:') and not pn:
- pn = line.split(':')[1].strip()
- if line.startswith('Version:') and not pv:
- pv = line.split(':')[1].strip()
- if pv or pn:
- if pv and not 'PV' in extravalues and validate_pv(pv):
- extravalues['PV'] = pv
- if pn and not 'PN' in extravalues:
- extravalues['PN'] = pn
- break
+ for value, varname in valuemap.items():
+ if line.startswith(value + ':') and not varname in foundvalues:
+ foundvalues[varname] = line.split(':', 1)[1].strip()
+ break
+ if len(foundvalues) == len(valuemap):
+ break
+ if 'PV' in foundvalues:
+ if not validate_pv(foundvalues['PV']):
+ del foundvalues['PV']
+ license = foundvalues.pop('LICENSE', None)
+ if license:
+ liccomment = '# NOTE: spec file indicates the license may be "%s"' % license
+ for i, line in enumerate(lines_before):
+ if line.startswith('LICENSE ='):
+ lines_before.insert(i, liccomment)
+ break
+ else:
+ lines_before.append(liccomment)
+ extravalues.update(foundvalues)
def register_recipe_handlers(handlers):
# Set priorities with some gaps so that other plugins can insert
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
index c3823307a..e41d81a31 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
@@ -61,8 +61,6 @@ class PythonRecipeHandler(RecipeHandler):
}
# PN/PV are already set by recipetool core & desc can be extremely long
excluded_fields = [
- 'Name',
- 'Version',
'Description',
]
setup_parse_map = {
@@ -88,8 +86,11 @@ class PythonRecipeHandler(RecipeHandler):
]
setuparg_multi_line_values = ['Description']
replacements = [
+ ('License', r' +$', ''),
+ ('License', r'^ +', ''),
('License', r' ', '-'),
- ('License', r'-License$', ''),
+ ('License', r'^GNU-', ''),
+ ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
('License', r'^UNKNOWN$', ''),
# Remove currently unhandled version numbers from these variables
@@ -218,6 +219,9 @@ class PythonRecipeHandler(RecipeHandler):
else:
info = self.get_setup_args_info(setupscript)
+ # Grab the license value before applying replacements
+ license_str = info.get('License', '').strip()
+
self.apply_info_replacements(info)
if uses_setuptools:
@@ -225,63 +229,53 @@ class PythonRecipeHandler(RecipeHandler):
else:
classes.append('distutils')
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith('LICENSE = '):
+ lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
+ break
+
if 'Classifier' in info:
+ existing_licenses = info.get('License', '')
licenses = []
for classifier in info['Classifier']:
if classifier in self.classifier_license_map:
license = self.classifier_license_map[classifier]
+ if license == 'Apache' and 'Apache-2.0' in existing_licenses:
+ license = 'Apache-2.0'
+ elif license == 'GPL':
+ if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
+ license = 'GPL-2.0'
+ elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
+ license = 'GPL-3.0'
+ elif license == 'LGPL':
+ if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
+ license = 'LGPL-2.1'
+ elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
+ license = 'LGPL-2.0'
+ elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
+ license = 'LGPL-3.0'
licenses.append(license)
if licenses:
info['License'] = ' & '.join(licenses)
-
# Map PKG-INFO & setup.py fields to bitbake variables
- bbinfo = {}
- for field, values in info.iteritems():
+ for field, values in info.items():
if field in self.excluded_fields:
continue
if field not in self.bbvar_map:
continue
- if isinstance(values, basestring):
+ if isinstance(values, str):
value = values
else:
value = ' '.join(str(v) for v in values if v)
bbvar = self.bbvar_map[field]
- if bbvar not in bbinfo and value:
- bbinfo[bbvar] = value
-
- comment_lic_line = None
- for pos, line in enumerate(list(lines_before)):
- if line.startswith('#') and 'LICENSE' in line:
- comment_lic_line = pos
- elif line.startswith('LICENSE =') and 'LICENSE' in bbinfo:
- if line in ('LICENSE = "Unknown"', 'LICENSE = "CLOSED"'):
- lines_before[pos] = 'LICENSE = "{}"'.format(bbinfo['LICENSE'])
- if line == 'LICENSE = "CLOSED"' and comment_lic_line:
- lines_before[comment_lic_line:pos] = [
- '# WARNING: the following LICENSE value is a best guess - it is your',
- '# responsibility to verify that the value is complete and correct.'
- ]
- del bbinfo['LICENSE']
-
- src_uri_line = None
- for pos, line in enumerate(lines_before):
- if line.startswith('SRC_URI ='):
- src_uri_line = pos
-
- if bbinfo:
- mdinfo = ['']
- for k in sorted(bbinfo):
- v = bbinfo[k]
- mdinfo.append('{} = "{}"'.format(k, v))
- if src_uri_line:
- lines_before[src_uri_line-1:src_uri_line-1] = mdinfo
- else:
- lines_before.extend(mdinfo)
+ if bbvar not in extravalues and value:
+ extravalues[bbvar] = value
mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
@@ -294,8 +288,8 @@ class PythonRecipeHandler(RecipeHandler):
lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
lines_after.append('#')
lines_after.append('# Uncomment this line to enable all the optional features.')
- lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req.iterkeys())))
- for feature, feature_reqs in extras_req.iteritems():
+ lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
+ for feature, feature_reqs in extras_req.items():
unmapped_deps.difference_update(feature_reqs)
feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
@@ -361,7 +355,7 @@ class PythonRecipeHandler(RecipeHandler):
# Naive mapping of setup() arguments to PKG-INFO field names
for d in [info, non_literals]:
- for key, value in d.items():
+ for key, value in list(d.items()):
new_key = _map(key)
if new_key != key:
del d[key]
@@ -436,14 +430,14 @@ class PythonRecipeHandler(RecipeHandler):
return value
value = info[variable]
- if isinstance(value, basestring):
+ if isinstance(value, str):
new_value = replace_value(search, replace, value)
if new_value is None:
del info[variable]
elif new_value != value:
info[variable] = new_value
- elif hasattr(value, 'iteritems'):
- for dkey, dvalue in value.iteritems():
+ elif hasattr(value, 'items'):
+ for dkey, dvalue in list(value.items()):
new_list = []
for pos, a_value in enumerate(dvalue):
new_value = replace_value(search, replace, a_value)
@@ -504,8 +498,10 @@ class PythonRecipeHandler(RecipeHandler):
for dep in scanned_deps:
mapped = provided_packages.get(dep)
if mapped:
+ logger.debug('Mapped %s to %s' % (dep, mapped))
mapped_deps.add(mapped)
else:
+ logger.debug('Could not map %s' % dep)
unmapped_deps.add(dep)
return mapped_deps, unmapped_deps
@@ -516,7 +512,7 @@ class PythonRecipeHandler(RecipeHandler):
except (OSError, subprocess.CalledProcessError):
pass
else:
- for line in dep_output.splitlines():
+ for line in dep_output.decode('utf-8').splitlines():
line = line.rstrip()
dep, filename = line.split('\t', 1)
if filename.endswith('/setup.py'):
@@ -558,7 +554,7 @@ class PythonRecipeHandler(RecipeHandler):
else:
continue
- for fn in files_info.iterkeys():
+ for fn in files_info:
for suffix in suffixes:
if fn.endswith(suffix):
break
@@ -566,6 +562,8 @@ class PythonRecipeHandler(RecipeHandler):
continue
if fn.startswith(dynload_dir + os.sep):
+ if '/.debug/' in fn:
+ continue
base = os.path.basename(fn)
provided = base.split('.', 1)[0]
packages[provided] = os.path.basename(pkgdatafile)
@@ -608,7 +606,7 @@ def gather_setup_info(fileobj):
visitor.visit(parsed)
non_literals, extensions = {}, []
- for key, value in visitor.keywords.items():
+ for key, value in list(visitor.keywords.items()):
if key == 'ext_modules':
if isinstance(value, list):
for ext in value:
@@ -640,7 +638,7 @@ class SetupScriptVisitor(ast.NodeVisitor):
def visit_setup(self, node):
call = LiteralAstTransform().visit(node)
self.keywords = call.keywords
- for k, v in self.keywords.iteritems():
+ for k, v in self.keywords.items():
if has_non_literals(v):
self.non_literals.append(k)
@@ -706,10 +704,10 @@ class LiteralAstTransform(ast.NodeTransformer):
def has_non_literals(value):
if isinstance(value, ast.AST):
return True
- elif isinstance(value, basestring):
+ elif isinstance(value, str):
return False
- elif hasattr(value, 'itervalues'):
- return any(has_non_literals(v) for v in value.itervalues())
+ elif hasattr(value, 'values'):
+ return any(has_non_literals(v) for v in value.values())
elif hasattr(value, '__iter__'):
return any(has_non_literals(v) for v in value)
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
index c6e86bd2b..7dac59fd0 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
@@ -59,7 +59,7 @@ class KernelRecipeHandler(RecipeHandler):
kpatchlevel = -1
ksublevel = -1
kextraversion = ''
- with open(makefile, 'r') as f:
+ with open(makefile, 'r', errors='surrogateescape') as f:
for i, line in enumerate(f):
if i > 10:
break
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_kmod.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_kmod.py
index fe39edb28..7cf188db2 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_kmod.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_kmod.py
@@ -53,7 +53,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
break
else:
continue
- with open(cfile, 'r') as f:
+ with open(cfile, 'r', errors='surrogateescape') as f:
for line in f:
if module_inc_re.match(line.strip()):
is_module = True
@@ -73,7 +73,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
in_install = False
in_compile = False
install_target = None
- with open(makefile, 'r') as f:
+ with open(makefile, 'r', errors='surrogateescape') as f:
for line in f:
if line.startswith('install:'):
if not install_lines:
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
index b3ffcdbc5..7bb844cb0 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
@@ -21,7 +21,7 @@ import subprocess
import tempfile
import shutil
import json
-from recipetool.create import RecipeHandler, split_pkg_licenses
+from recipetool.create import RecipeHandler, split_pkg_licenses, handle_license_vars, check_npm
logger = logging.getLogger('recipetool')
@@ -45,7 +45,7 @@ class NpmRecipeHandler(RecipeHandler):
license = data['license']
if isinstance(license, dict):
license = license.get('type', None)
- return None
+ return license
def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before):
try:
@@ -83,6 +83,66 @@ class NpmRecipeHandler(RecipeHandler):
extravalues['extrafiles']['lockdown.json'] = tmpfile
lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
+ def _handle_dependencies(self, d, deps, lines_before, srctree):
+ import scriptutils
+ # If this isn't a single module we need to get the dependencies
+ # and add them to SRC_URI
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'SRC_URI':
+ if not origvalue.startswith('npm://'):
+ src_uri = origvalue.split()
+ changed = False
+ for dep, depdata in deps.items():
+ version = self.get_node_version(dep, depdata, d)
+ if version:
+ url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
+ scriptutils.fetch_uri(d, url, srctree)
+ src_uri.append(url)
+ changed = True
+ if changed:
+ return src_uri, None, -1, True
+ return origvalue, None, 0, True
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
+ if updated:
+ del lines_before[:]
+ for line in newlines:
+ # Hack to avoid newlines that edit_metadata inserts
+ if line.endswith('\n'):
+ line = line[:-1]
+ lines_before.append(line)
+ return updated
+
+ def _replace_license_vars(self, srctree, lines_before, handled, extravalues, d):
+ for item in handled:
+ if isinstance(item, tuple):
+ if item[0] == 'license':
+ del item
+ break
+
+ calledvars = []
+ def varfunc(varname, origvalue, op, newlines):
+ if varname in ['LICENSE', 'LIC_FILES_CHKSUM']:
+ for i, e in enumerate(reversed(newlines)):
+ if not e.startswith('#'):
+ stop = i
+ while stop > 0:
+ newlines.pop()
+ stop -= 1
+ break
+ calledvars.append(varname)
+ if len(calledvars) > 1:
+ # The second time around, put the new license text in
+ insertpos = len(newlines)
+ handle_license_vars(srctree, newlines, handled, extravalues, d)
+ return None, None, 0, True
+ return origvalue, None, 0, True
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['LICENSE', 'LIC_FILES_CHKSUM'], varfunc)
+ if updated:
+ del lines_before[:]
+ lines_before.extend(newlines)
+ else:
+ raise Exception('Did not find license variables')
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
import bb.utils
import oe
@@ -92,11 +152,13 @@ class NpmRecipeHandler(RecipeHandler):
return False
def read_package_json(fn):
- with open(fn, 'r') as f:
+ with open(fn, 'r', errors='surrogateescape') as f:
return json.loads(f.read())
files = RecipeHandler.checkfiles(srctree, ['package.json'])
if files:
+ check_npm(tinfoil.config_data)
+
data = read_package_json(files[0])
if 'name' in data and 'version' in data:
extravalues['PN'] = data['name']
@@ -104,9 +166,15 @@ class NpmRecipeHandler(RecipeHandler):
classes.append('npm')
handled.append('buildsystem')
if 'description' in data:
- lines_before.append('SUMMARY = "%s"' % data['description'])
+ extravalues['SUMMARY'] = data['description']
if 'homepage' in data:
- lines_before.append('HOMEPAGE = "%s"' % data['homepage'])
+ extravalues['HOMEPAGE'] = data['homepage']
+
+ deps = data.get('dependencies', {})
+ updated = self._handle_dependencies(tinfoil.config_data, deps, lines_before, srctree)
+ if updated:
+ # We need to redo the license stuff
+ self._replace_license_vars(srctree, lines_before, handled, extravalues, tinfoil.config_data)
# Shrinkwrap
localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
@@ -128,7 +196,7 @@ class NpmRecipeHandler(RecipeHandler):
license = self._handle_license(data)
if license:
licenses['${PN}'] = license
- for pkgname, pkgitem in npmpackages.iteritems():
+ for pkgname, pkgitem in npmpackages.items():
_, pdata = pkgitem
license = self._handle_license(pdata)
if license:
@@ -136,7 +204,7 @@ class NpmRecipeHandler(RecipeHandler):
# Now write out the package-specific license values
# We need to strip out the json data dicts for this since split_pkg_licenses
# isn't expecting it
- packages = OrderedDict((x,y[0]) for x,y in npmpackages.iteritems())
+ packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
packages['${PN}'] = ''
pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
all_licenses = list(set([item for pkglicense in pkglicenses.values() for item in pkglicense]))
@@ -148,9 +216,52 @@ class NpmRecipeHandler(RecipeHandler):
lines_before[i] = 'LICENSE = "%s"' % ' & '.join(all_licenses)
break
+ # Need to move S setting after inherit npm
+ for i, line in enumerate(lines_before):
+ if line.startswith('S ='):
+ lines_before.pop(i)
+ lines_after.insert(0, '# Must be set after inherit npm since that itself sets S')
+ lines_after.insert(1, line)
+ break
+
return True
return False
+ # FIXME this is duplicated from lib/bb/fetch2/npm.py
+ def _parse_view(self, output):
+ '''
+ Parse the output of npm view --json; the last JSON result
+ is assumed to be the one that we're interested in.
+ '''
+ pdata = None
+ outdeps = {}
+ datalines = []
+ bracelevel = 0
+ for line in output.splitlines():
+ if bracelevel:
+ datalines.append(line)
+ elif '{' in line:
+ datalines = []
+ datalines.append(line)
+ bracelevel = bracelevel + line.count('{') - line.count('}')
+ if datalines:
+ pdata = json.loads('\n'.join(datalines))
+ return pdata
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def get_node_version(self, pkg, version, d):
+ import bb.fetch2
+ pkgfullname = pkg
+ if version != '*' and not '/' in version:
+ pkgfullname += "@'%s'" % version
+ logger.debug(2, "Calling getdeps on %s" % pkg)
+ runenv = dict(os.environ, PATH=d.getVar('PATH', True))
+ fetchcmd = "npm view %s --json" % pkgfullname
+ output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ data = self._parse_view(output)
+ return data.get('version', None)
+
def register_recipe_handlers(handlers):
handlers.append((NpmRecipeHandler(), 60))
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py b/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
index bdf0693ec..fbdd7bcef 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
@@ -39,16 +39,6 @@ def tinfoil_init(instance):
tinfoil = instance
-def _provide_to_pn(cooker, provide):
- """Get the name of the preferred recipe for the specified provide."""
- import bb.providers
- filenames = cooker.recipecache.providers[provide]
- eligible, foundUnique = bb.providers.filterProviders(filenames, provide, cooker.expanded_data, cooker.recipecache)
- filename = eligible[0]
- pn = cooker.recipecache.pkg_fn[filename]
- return pn
-
-
def _get_recipe_file(cooker, pn):
import oe.recipeutils
recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
@@ -70,8 +60,7 @@ def layer(layerpath):
def newappend(args):
import oe.recipeutils
- pn = _provide_to_pn(tinfoil.cooker, args.target)
- recipe_path = _get_recipe_file(tinfoil.cooker, pn)
+ recipe_path = _get_recipe_file(tinfoil.cooker, args.target)
rd = tinfoil.config_data.createCopy()
rd.setVar('FILE', recipe_path)
@@ -81,7 +70,7 @@ def newappend(args):
return 1
if not path_ok:
- logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath))
+ logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
if not os.path.abspath(args.destlayer) in layerdirs:
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py b/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
index 657d2b6a7..85701c06a 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
@@ -51,7 +51,7 @@ def setvar(args):
if args.recipe_only:
patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
else:
- rd = oe.recipeutils.parse_recipe(args.recipefile, None, tinfoil.config_data)
+ rd = oe.recipeutils.parse_recipe(tinfoil.cooker, args.recipefile, None)
if not rd:
return 1
patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
diff --git a/import-layers/yocto-poky/scripts/lib/scriptutils.py b/import-layers/yocto-poky/scripts/lib/scriptutils.py
index aef19d3d7..5ccc02796 100644
--- a/import-layers/yocto-poky/scripts/lib/scriptutils.py
+++ b/import-layers/yocto-poky/scripts/lib/scriptutils.py
@@ -103,7 +103,7 @@ def fetch_uri(d, uri, destdir, srcrev=None):
return ret
def run_editor(fn):
- if isinstance(fn, basestring):
+ if isinstance(fn, str):
params = '"%s"' % fn
else:
params = ''
@@ -116,3 +116,16 @@ def run_editor(fn):
except OSError as exc:
logger.error("Execution of editor '%s' failed: %s", editor, exc)
return 1
+
+def is_src_url(param):
+ """
+ Check if a parameter is a URL and return True if so
+ NOTE: be careful about changing this as it will influence how devtool/recipetool command line handling works
+ """
+ if not param:
+ return False
+ elif '://' in param:
+ return True
+ elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
+ return True
+ return False
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks
index ea01cf375..8d7d8de6e 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks
@@ -6,5 +6,5 @@
part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
-bootloader --ptable gpt --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+bootloader --ptable gpt --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
index 8a81f8f51..f61d941d6 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
@@ -19,5 +19,5 @@ part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 10
part / --source rootfs --rootfs-dir=rootfs1 --ondisk sda --fstype=ext4 --label platform --align 1024
part /rescue --source rootfs --rootfs-dir=rootfs2 --ondisk sda --fstype=ext4 --label secondary --align 1024
-bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk.wks
index 6db74a78b..8c8e06b02 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0"
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkefidisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkefidisk.wks
index 696e94e3d..9f534fe18 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkefidisk.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkefidisk.wks
@@ -4,8 +4,8 @@
part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
+bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks
index 66a22f60b..f3ae09099 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks
@@ -8,4 +8,4 @@ part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-bootloader --timeout=10 --append="rootwait rootfstype=ext4 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0"
+bootloader --ptable gpt --timeout=5 --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
new file mode 100644
index 000000000..b90002356
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image with systemd-boot
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media. The selected bootloader is systemd-boot.
+
+part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+bootloader --ptable gpt --timeout=5 --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/conf.py b/import-layers/yocto-poky/scripts/lib/wic/conf.py
index f7d56d046..070ec3096 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/conf.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/conf.py
@@ -70,7 +70,7 @@ class ConfigMgr(object):
self.create = {}
# initialize the values with defaults
- for sec, vals in self.DEFAULTS.iteritems():
+ for sec, vals in self.DEFAULTS.items():
setattr(self, sec, vals)
def __set_ksconf(self, ksconf):
diff --git a/import-layers/yocto-poky/scripts/lib/wic/creator.py b/import-layers/yocto-poky/scripts/lib/wic/creator.py
index 523129728..8f7d1503f 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/creator.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/creator.py
@@ -24,7 +24,7 @@ from wic.conf import configmgr
from wic.plugin import pluginmgr
-class Creator(object):
+class Creator():
"""${name}: create an image
Usage:
@@ -41,7 +41,7 @@ class Creator(object):
# get cmds from pluginmgr
# mix-in do_subcmd interface
- for subcmd, klass in pluginmgr.get_plugins('imager').iteritems():
+ for subcmd, klass in pluginmgr.get_plugins('imager').items():
if not hasattr(klass, 'do_create'):
msger.warning("Unsupported subcmd: %s" % subcmd)
continue
@@ -69,6 +69,7 @@ class Creator(object):
optparser.add_option('', '--tmpfs', action='store_true', dest='enabletmpfs',
help='Setup tmpdir as tmpfs to accelerate, experimental'
' feature, use it if you have more than 4G memory')
+ optparser.add_option('', '--bmap', action='store_true', help='generate .bmap')
return optparser
def postoptparse(self, options):
diff --git a/import-layers/yocto-poky/scripts/lib/wic/engine.py b/import-layers/yocto-poky/scripts/lib/wic/engine.py
index 76b93e82f..5b104631c 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/engine.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/engine.py
@@ -44,7 +44,7 @@ def verify_build_env():
Returns True if it is, false otherwise
"""
if not os.environ.get("BUILDDIR"):
- print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
sys.exit(1)
return True
@@ -107,7 +107,7 @@ def list_canned_images(scripts_path):
desc = line[idx + len("short-description:"):].strip()
break
basename = os.path.splitext(fname)[0]
- print " %s\t\t%s" % (basename.ljust(30), desc)
+ print(" %s\t\t%s" % (basename.ljust(30), desc))
def list_canned_image_help(scripts_path, fullpath):
@@ -120,15 +120,15 @@ def list_canned_image_help(scripts_path, fullpath):
if not found:
idx = line.find("long-description:")
if idx != -1:
- print
- print line[idx + len("long-description:"):].strip()
+ print()
+ print(line[idx + len("long-description:"):].strip())
found = True
continue
if not line.strip():
break
idx = line.find("#")
if idx != -1:
- print line[idx + len("#:"):].rstrip()
+ print(line[idx + len("#:"):].rstrip())
else:
break
@@ -140,12 +140,12 @@ def list_source_plugins():
plugins = pluginmgr.get_source_plugins()
for plugin in plugins:
- print " %s" % plugin
+ print(" %s" % plugin)
def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, scripts_path, image_output_dir,
- compressor, debug):
+ compressor, bmap, debug):
"""Create image
wks_file - user-defined OE kickstart file
@@ -156,6 +156,7 @@ def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
scripts_path - absolute path to /scripts dir
image_output_dir - dirname to create for image
compressor - compressor utility to compress the image
+ bmap - enable generation of .bmap
Normally, the values for the build artifacts values are determined
by 'wic -e' from the output of the 'bitbake -e' command given an
@@ -178,7 +179,7 @@ def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
try:
oe_builddir = os.environ["BUILDDIR"]
except KeyError:
- print "BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)"
+ print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
sys.exit(1)
if debug:
@@ -186,10 +187,14 @@ def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
crobj = creator.Creator()
- crobj.main(["direct", native_sysroot, kernel_dir, bootimg_dir, rootfs_dir,
- wks_file, image_output_dir, oe_builddir, compressor or ""])
+ cmdline = ["direct", native_sysroot, kernel_dir, bootimg_dir, rootfs_dir,
+ wks_file, image_output_dir, oe_builddir, compressor or ""]
+ if bmap:
+ cmdline.append('--bmap')
- print "\nThe image(s) were created using OE kickstart file:\n %s" % wks_file
+ crobj.main(cmdline)
+
+ print("\nThe image(s) were created using OE kickstart file:\n %s" % wks_file)
def wic_list(args, scripts_path):
@@ -209,10 +214,10 @@ def wic_list(args, scripts_path):
wks_file = args[0]
fullpath = find_canned_image(scripts_path, wks_file)
if not fullpath:
- print "No image named %s found, exiting. "\
+ print("No image named %s found, exiting. "\
"(Use 'wic list images' to list available images, or "\
"specify a fully-qualified OE kickstart (.wks) "\
- "filename)\n" % wks_file
+ "filename)\n" % wks_file)
sys.exit(1)
list_canned_image_help(scripts_path, fullpath)
return True
diff --git a/import-layers/yocto-poky/scripts/lib/wic/filemap.py b/import-layers/yocto-poky/scripts/lib/wic/filemap.py
new file mode 100644
index 000000000..f3240ba8d
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/wic/filemap.py
@@ -0,0 +1,561 @@
+# Copyright (c) 2012 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License, version 2,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+"""
+This module implements python implements a way to get file block. Two methods
+are supported - the FIEMAP ioctl and the 'SEEK_HOLE / SEEK_DATA' features of
+the file seek syscall. The former is implemented by the 'FilemapFiemap' class,
+the latter is implemented by the 'FilemapSeek' class. Both classes provide the
+same API. The 'filemap' function automatically selects which class can be used
+and returns an instance of the class.
+"""
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes (R0902)
+# pylint: disable=R0902
+
+import os
+import struct
+import array
+import fcntl
+import tempfile
+import logging
+
+def get_block_size(file_obj):
+ """
+ Returns block size for file object 'file_obj'. Errors are indicated by the
+ 'IOError' exception.
+ """
+
+ from fcntl import ioctl
+ import struct
+
+ # Get the block size of the host file-system for the image file by calling
+ # the FIGETBSZ ioctl (number 2).
+ binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
+ return struct.unpack('I', binary_data)[0]
+
+class ErrorNotSupp(Exception):
+ """
+ An exception of this type is raised when the 'FIEMAP' or 'SEEK_HOLE' feature
+ is not supported either by the kernel or the file-system.
+ """
+ pass
+
+class Error(Exception):
+ """A class for all the other exceptions raised by this module."""
+ pass
+
+
+class _FilemapBase(object):
+ """
+ This is a base class for a couple of other classes in this module. This
+ class simply performs the common parts of the initialization process: opens
+ the image file, gets its size, etc. The 'log' parameter is the logger object
+ to use for printing messages.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full path to the
+ file or file object to operate on.
+ """
+
+ self._log = log
+ if self._log is None:
+ self._log = logging.getLogger(__name__)
+
+ self._f_image_needs_close = False
+
+ if hasattr(image, "fileno"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ try:
+ self.image_size = os.fstat(self._f_image.fileno()).st_size
+ except IOError as err:
+ raise Error("cannot get information about file '%s': %s"
+ % (self._f_image.name, err))
+
+ try:
+ self.block_size = get_block_size(self._f_image)
+ except IOError as err:
+ raise Error("cannot get block size for '%s': %s"
+ % (self._image_path, err))
+
+ self.blocks_cnt = self.image_size + self.block_size - 1
+ self.blocks_cnt //= self.block_size
+
+ try:
+ self._f_image.flush()
+ except IOError as err:
+ raise Error("cannot flush image file '%s': %s"
+ % (self._image_path, err))
+
+ try:
+ os.fsync(self._f_image.fileno()),
+ except OSError as err:
+ raise Error("cannot synchronize image file '%s': %s "
+ % (self._image_path, err.strerror))
+
+ self._log.debug("opened image \"%s\"" % self._image_path)
+ self._log.debug("block size %d, blocks count %d, image size %d"
+ % (self.block_size, self.blocks_cnt, self.image_size))
+
+ def __del__(self):
+ """The class destructor which just closes the image file."""
+ if self._f_image_needs_close:
+ self._f_image.close()
+
+ def _open_image_file(self):
+ """Open the image file."""
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s"
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def block_is_mapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is mapped and 'False'
+ otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is not mapped (hole)
+ and 'False' otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. This is a
+ generator which yields ranges of mapped blocks in the file. The ranges
+ are tuples of 2 elements: [first, last], where 'first' is the first
+ mapped block and 'last' is the last mapped block.
+
+ The ranges are yielded for the area of the file of size 'count' blocks,
+ starting from block 'start'.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. Just like
+ 'get_mapped_ranges()', but yields unmapped block ranges instead
+ (holes).
+ """
+
+ raise Error("the method is not implemented")
+
+
+# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
+_SEEK_DATA = 3
+_SEEK_HOLE = 4
+
+def _lseek(file_obj, offset, whence):
+ """This is a helper function which invokes 'os.lseek' for file object
+ 'file_obj' and with specified 'offset' and 'whence'. The 'whence'
+ argument is supposed to be either '_SEEK_DATA' or '_SEEK_HOLE'. When
+ there is no more data or hole starting from 'offset', this function
+ returns '-1'. Otherwise the data or hole position is returned."""
+
+ try:
+ return os.lseek(file_obj.fileno(), offset, whence)
+ except OSError as err:
+ # The 'lseek' system call returns the ENXIO if there is no data or
+ # hole starting from the specified offset.
+ if err.errno == os.errno.ENXIO:
+ return -1
+ elif err.errno == os.errno.EINVAL:
+ raise ErrorNotSupp("the kernel or file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\"")
+ else:
+ raise
+
+class FilemapSeek(_FilemapBase):
+ """
+ This class uses the 'SEEK_HOLE' and 'SEEK_DATA' to find file block mapping.
+ Unfortunately, the current implementation requires the caller to have write
+ access to the image file.
+ """
+
+ def __init__(self, image, log=None):
+ """Refer the '_FilemapBase' class for the documentation."""
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapSeek: initializing")
+
+ self._probe_seek_hole()
+
+ def _probe_seek_hole(self):
+ """
+ Check whether the system implements 'SEEK_HOLE' and 'SEEK_DATA'.
+ Unfortunately, there seems to be no clean way for detecting this,
+ because often the system just fakes them by just assuming that all
+ files are fully mapped, so 'SEEK_HOLE' always returns EOF and
+ 'SEEK_DATA' always returns the requested offset.
+
+ I could not invent a better way of detecting the fake 'SEEK_HOLE'
+ implementation than just to create a temporary file in the same
+ directory where the image file resides. It would be nice to change this
+ to something better.
+ """
+
+ directory = os.path.dirname(self._image_path)
+
+ try:
+ tmp_obj = tempfile.TemporaryFile("w+", dir=directory)
+ except IOError as err:
+ raise ErrorNotSupp("cannot create a temporary in \"%s\": %s"
+ % (directory, err))
+
+ try:
+ os.ftruncate(tmp_obj.fileno(), self.block_size)
+ except OSError as err:
+ raise ErrorNotSupp("cannot truncate temporary file in \"%s\": %s"
+ % (directory, err))
+
+ offs = _lseek(tmp_obj, 0, _SEEK_HOLE)
+ if offs != 0:
+ # We are dealing with the stub 'SEEK_HOLE' implementation which
+ # always returns EOF.
+ self._log.debug("lseek(0, SEEK_HOLE) returned %d" % offs)
+ raise ErrorNotSupp("the file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\" but only "
+ "provides a stub implementation")
+
+ tmp_obj.close()
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ offs = _lseek(self._f_image, block * self.block_size, _SEEK_DATA)
+ if offs == -1:
+ result = False
+ else:
+ result = (offs // self.block_size == block)
+
+ self._log.debug("FilemapSeek: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _get_ranges(self, start, count, whence1, whence2):
+ """
+ This function implements 'get_mapped_ranges()' and
+ 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
+ and 'whence2' arguments.
+ """
+
+ assert whence1 != whence2
+ end = start * self.block_size
+ limit = end + count * self.block_size
+
+ while True:
+ start = _lseek(self._f_image, end, whence1)
+ if start == -1 or start >= limit or start == self.image_size:
+ break
+
+ end = _lseek(self._f_image, start, whence2)
+ if end == -1 or end == self.image_size:
+ end = self.blocks_cnt * self.block_size
+ if end > limit:
+ end = limit
+
+ start_blk = start // self.block_size
+ end_blk = end // self.block_size - 1
+ self._log.debug("FilemapSeek: yielding range (%d, %d)"
+ % (start_blk, end_blk))
+ yield (start_blk, end_blk)
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
+
+
+# Below goes the FIEMAP ioctl implementation, which is not very readable
+# because it deals with the rather complex FIEMAP ioctl. To understand the
+# code, you need to know the FIEMAP interface, which is documented in the
+# "Documentation/filesystems/fiemap.txt" file in the Linux kernel sources.
+
+# Format string for 'struct fiemap'
+_FIEMAP_FORMAT = "=QQLLLL"
+# sizeof(struct fiemap)
+_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
+# Format string for 'struct fiemap_extent'
+_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
+# sizeof(struct fiemap_extent)
+_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
+# The FIEMAP ioctl number
+_FIEMAP_IOCTL = 0xC020660B
+# This FIEMAP ioctl flag which instructs the kernel to sync the file before
+# reading the block map
+_FIEMAP_FLAG_SYNC = 0x00000001
+# Size of the buffer for 'struct fiemap_extent' elements which will be used
+# when invoking the FIEMAP ioctl. The larger is the buffer, the less times the
+# FIEMAP ioctl will be invoked.
+_FIEMAP_BUFFER_SIZE = 256 * 1024
+
+class FilemapFiemap(_FilemapBase):
+ """
+ This class provides API to the FIEMAP ioctl. Namely, it allows to iterate
+ over all mapped blocks and over all holes.
+
+ This class synchronizes the image file every time it invokes the FIEMAP
+ ioctl in order to work-around early FIEMAP implementation kernel bugs.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full the file
+ object to operate on.
+ """
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapFiemap: initializing")
+
+ self._buf_size = _FIEMAP_BUFFER_SIZE
+
+ # Calculate how many 'struct fiemap_extent' elements fit the buffer
+ self._buf_size -= _FIEMAP_SIZE
+ self._fiemap_extent_cnt = self._buf_size // _FIEMAP_EXTENT_SIZE
+ assert self._fiemap_extent_cnt > 0
+ self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
+ self._buf_size += _FIEMAP_SIZE
+
+ # Allocate a mutable buffer for the FIEMAP ioctl
+ self._buf = array.array('B', [0] * self._buf_size)
+
+ # Check if the FIEMAP ioctl is supported
+ self.block_is_mapped(0)
+
+ def _invoke_fiemap(self, block, count):
+ """
+ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
+ block number 'block'.
+
+ The full result of the operation is stored in 'self._buf' on exit.
+ Returns the unpacked 'struct fiemap' data structure in form of a python
+ list (just like 'struct.upack()').
+ """
+
+ if self.blocks_cnt != 0 and (block < 0 or block >= self.blocks_cnt):
+ raise Error("bad block number %d, should be within [0, %d]"
+ % (block, self.blocks_cnt))
+
+ # Initialize the 'struct fiemap' part of the buffer. We use the
+ # '_FIEMAP_FLAG_SYNC' flag in order to make sure the file is
+ # synchronized. The reason for this is that early FIEMAP
+ # implementations had many bugs related to cached dirty data, and
+ # synchronizing the file is a necessary work-around.
+ struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
+ count * self.block_size, _FIEMAP_FLAG_SYNC, 0,
+ self._fiemap_extent_cnt, 0)
+
+ try:
+ fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
+ except IOError as err:
+ # Note, the FIEMAP ioctl is supported by the Linux kernel starting
+ # from version 2.6.28 (year 2008).
+ if err.errno == os.errno.EOPNOTSUPP:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the file-system"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ if err.errno == os.errno.ENOTTY:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the kernel"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ raise Error("the FIEMAP ioctl failed for '%s': %s"
+ % (self._image_path, err))
+
+ return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ struct_fiemap = self._invoke_fiemap(block, 1)
+
+ # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
+ # If it contains zero, the block is not mapped, otherwise it is
+ # mapped.
+ result = bool(struct_fiemap[3])
+ self._log.debug("FilemapFiemap: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _unpack_fiemap_extent(self, index):
+ """
+ Unpack a 'struct fiemap_extent' structure object number 'index' from
+ the internal 'self._buf' buffer.
+ """
+
+ offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
+ return struct.unpack(_FIEMAP_EXTENT_FORMAT,
+ self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
+
+ def _do_get_mapped_ranges(self, start, count):
+ """
+ Implements most the functionality for the 'get_mapped_ranges()'
+ generator: invokes the FIEMAP ioctl, walks through the mapped extents
+ and yields mapped block ranges. However, the ranges may be consecutive
+ (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' simply merges
+ them.
+ """
+
+ block = start
+ while block < start + count:
+ struct_fiemap = self._invoke_fiemap(block, count)
+
+ mapped_extents = struct_fiemap[3]
+ if mapped_extents == 0:
+ # No more mapped blocks
+ return
+
+ extent = 0
+ while extent < mapped_extents:
+ fiemap_extent = self._unpack_fiemap_extent(extent)
+
+ # Start of the extent
+ extent_start = fiemap_extent[0]
+ # Starting block number of the extent
+ extent_block = extent_start // self.block_size
+ # Length of the extent
+ extent_len = fiemap_extent[2]
+ # Count of blocks in the extent
+ extent_count = extent_len // self.block_size
+
+ # Extent length and offset have to be block-aligned
+ assert extent_start % self.block_size == 0
+ assert extent_len % self.block_size == 0
+
+ if extent_block > start + count - 1:
+ return
+
+ first = max(extent_block, block)
+ last = min(extent_block + extent_count, start + count) - 1
+ yield (first, last)
+
+ extent += 1
+
+ block = extent_block + extent_count
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ iterator = self._do_get_mapped_ranges(start, count)
+ first_prev, last_prev = next(iterator)
+
+ for first, last in iterator:
+ if last_prev == first - 1:
+ last_prev = last
+ else:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+ first_prev, last_prev = first, last
+
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ hole_first = start
+ for first, last in self._do_get_mapped_ranges(start, count):
+ if first > hole_first:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, first - 1))
+ yield (hole_first, first - 1)
+
+ hole_first = last + 1
+
+ if hole_first < start + count:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, start + count - 1))
+ yield (hole_first, start + count - 1)
+
+def filemap(image, log=None):
+ """
+ Create and return an instance of a Filemap class - 'FilemapFiemap' or
+ 'FilemapSeek', depending on what the system we run on supports. If the
+ FIEMAP ioctl is supported, an instance of the 'FilemapFiemap' class is
+ returned. Otherwise, if 'SEEK_HOLE' is supported an instance of the
+ 'FilemapSeek' class is returned. If none of these are supported, the
+ function generates an 'Error' type exception.
+ """
+
+ try:
+ return FilemapFiemap(image, log)
+ except ErrorNotSupp:
+ return FilemapSeek(image, log)
+
+def sparse_copy(src_fname, dst_fname, offset=0, skip=0):
+ """Efficiently copy sparse file to or into another file."""
+ fmap = filemap(src_fname)
+ try:
+ dst_file = open(dst_fname, 'r+b')
+ except IOError:
+ dst_file = open(dst_fname, 'wb')
+
+ for first, last in fmap.get_mapped_ranges(0, fmap.blocks_cnt):
+ start = first * fmap.block_size
+ end = (last + 1) * fmap.block_size
+
+ if start < skip < end:
+ start = skip
+
+ fmap._f_image.seek(start, os.SEEK_SET)
+ dst_file.seek(offset + start, os.SEEK_SET)
+
+ chunk_size = 1024 * 1024
+ to_read = end - start
+ read = 0
+
+ while read < to_read:
+ if read + chunk_size > to_read:
+ chunk_size = to_read - read
+ chunk = fmap._f_image.read(chunk_size)
+ dst_file.write(chunk)
+ read += chunk_size
+ dst_file.close()
diff --git a/import-layers/yocto-poky/scripts/lib/wic/help.py b/import-layers/yocto-poky/scripts/lib/wic/help.py
index 405d25a87..e5347ec4b 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/help.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/help.py
@@ -45,7 +45,7 @@ def display_help(subcommand, subcommands):
if callable(hlp):
hlp = hlp()
pager = subprocess.Popen('less', stdin=subprocess.PIPE)
- pager.communicate(hlp)
+ pager.communicate(hlp.encode('utf-8'))
return True
@@ -55,7 +55,7 @@ def wic_help(args, usage_str, subcommands):
Subcommand help dispatcher.
"""
if len(args) == 1 or not display_help(args[1], subcommands):
- print usage_str
+ print(usage_str)
def get_wic_plugins_help():
@@ -66,7 +66,7 @@ def get_wic_plugins_help():
result = wic_plugins_help
for plugin_type in PLUGIN_TYPES:
result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
- for name, plugin in pluginmgr.get_plugins(plugin_type).iteritems():
+ for name, plugin in pluginmgr.get_plugins(plugin_type).items():
result += "\n %s plugin:\n" % name
if plugin.__doc__:
result += plugin.__doc__
@@ -152,7 +152,7 @@ SYNOPSIS
[-e | --image-name] [-s, --skip-build-check] [-D, --debug]
[-r, --rootfs-dir] [-b, --bootimg-dir]
[-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
- [-c, --compress-with]
+ [-c, --compress-with] [-m, --bmap]
DESCRIPTION
This command creates an OpenEmbedded image based on the 'OE
@@ -221,6 +221,9 @@ DESCRIPTION
The -c option is used to specify compressor utility to compress
an image. gzip, bzip2 and xz compressors are supported.
+
+ The -m option is used to produce .bmap file for the image. This file
+ can be used to flash image using bmaptool utility.
"""
wic_list_usage = """
@@ -738,6 +741,10 @@ DESCRIPTION
in bootloader configuration before running wic. In this case .wks file can
be generated or modified to set preconfigured parition UUID using this option.
+ --system-id: This option is specific to wic. It specifies partition system id. It's useful
+ for the harware that requires non-default partition system ids. The parameter
+ in one byte long hex number either with 0x prefix or without it.
+
* bootloader
This command allows the user to specify various bootloader
diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py b/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py
index 760cf8a58..1a52dd8b4 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py
@@ -16,7 +16,6 @@
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-from __future__ import with_statement
import os
import tempfile
import shutil
@@ -25,7 +24,7 @@ from wic import msger
from wic.utils.errors import CreatorError
from wic.utils import runner
-class BaseImageCreator(object):
+class BaseImageCreator():
"""Base class for image creation.
BaseImageCreator is the simplest creator class available; it will
@@ -68,7 +67,7 @@ class BaseImageCreator(object):
}
# update setting from createopts
- for key in createopts.keys():
+ for key in createopts:
if key in optmap:
option = optmap[key]
else:
diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py b/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py
index a1b424965..edf5e5d22 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py
@@ -26,20 +26,40 @@
import os
import shutil
+import uuid
from wic import msger
-from wic.utils import fs_related
from wic.utils.oe.misc import get_bitbake_var
from wic.utils.partitionedfs import Image
from wic.utils.errors import CreatorError, ImageError
from wic.imager.baseimager import BaseImageCreator
from wic.plugin import pluginmgr
-from wic.utils.oe.misc import exec_cmd
+from wic.utils.oe.misc import exec_cmd, exec_native_cmd
disk_methods = {
"do_install_disk":None,
}
+class DiskImage():
+ """
+ A Disk backed by a file.
+ """
+ def __init__(self, device, size):
+ self.size = size
+ self.device = device
+ self.created = False
+
+ def exists(self):
+ return os.path.exists(self.device)
+
+ def create(self):
+ if self.created:
+ return
+ # create sparse disk image
+ cmd = "truncate %s -s %s" % (self.device, self.size)
+ exec_cmd(cmd)
+ self.created = True
+
class DirectImageCreator(BaseImageCreator):
"""
Installs a system into a file containing a partitioned disk image.
@@ -52,7 +72,8 @@ class DirectImageCreator(BaseImageCreator):
"""
def __init__(self, oe_builddir, image_output_dir, rootfs_dir, bootimg_dir,
- kernel_dir, native_sysroot, compressor, creatoropts=None):
+ kernel_dir, native_sysroot, compressor, creatoropts=None,
+ bmap=False):
"""
Initialize a DirectImageCreator instance.
@@ -74,6 +95,7 @@ class DirectImageCreator(BaseImageCreator):
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
self.compressor = compressor
+ self.bmap = bmap
def __get_part_num(self, num, parts):
"""calculate the real partition number, accounting for partitions not
@@ -221,12 +243,23 @@ class DirectImageCreator(BaseImageCreator):
self.__image = Image(self.native_sysroot)
- for part in parts:
+ disk_ids = {}
+ for num, part in enumerate(parts, 1):
# as a convenience, set source to the boot partition source
# instead of forcing it to be set via bootloader --source
if not self.ks.bootloader.source and part.mountpoint == "/boot":
self.ks.bootloader.source = part.source
+ # generate parition UUIDs
+ if not part.uuid and part.use_uuid:
+ if self.ptable_format == 'gpt':
+ part.uuid = str(uuid.uuid4())
+ else: # msdos partition table
+ if part.disk not in disk_ids:
+ disk_ids[part.disk] = int.from_bytes(os.urandom(4), 'little')
+ disk_id = disk_ids[part.disk]
+ part.uuid = '%0x-%02d' % (disk_id, self.__get_part_num(num, parts))
+
fstab_path = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
shutil.rmtree(self.workdir)
@@ -267,7 +300,8 @@ class DirectImageCreator(BaseImageCreator):
align=part.align,
no_table=part.no_table,
part_type=part.part_type,
- uuid=part.uuid)
+ uuid=part.uuid,
+ system_id=part.system_id)
if fstab_path:
shutil.move(fstab_path + ".orig", fstab_path)
@@ -279,9 +313,9 @@ class DirectImageCreator(BaseImageCreator):
full_path = self._full_path(self.__imgdir, disk_name, "direct")
msger.debug("Adding disk %s as %s with size %s bytes" \
% (disk_name, full_path, disk['min_size']))
- disk_obj = fs_related.DiskImage(full_path, disk['min_size'])
+ disk_obj = DiskImage(full_path, disk['min_size'])
self.__disks[disk_name] = disk_obj
- self.__image.add_disk(disk_name, disk_obj)
+ self.__image.add_disk(disk_name, disk_obj, disk_ids.get(disk_name))
self.__image.create()
@@ -313,12 +347,17 @@ class DirectImageCreator(BaseImageCreator):
self.bootimg_dir,
self.kernel_dir,
self.native_sysroot)
- # Compress the image
- if self.compressor:
- for disk_name, disk in self.__image.disks.items():
- full_path = self._full_path(self.__imgdir, disk_name, "direct")
- msger.debug("Compressing disk %s with %s" % \
- (disk_name, self.compressor))
+
+ for disk_name, disk in self.__image.disks.items():
+ full_path = self._full_path(self.__imgdir, disk_name, "direct")
+ # Generate .bmap
+ if self.bmap:
+ msger.debug("Generating bmap file for %s" % disk_name)
+ exec_native_cmd("bmaptool create %s -o %s.bmap" % (full_path, full_path),
+ self.native_sysroot)
+ # Compress the image
+ if self.compressor:
+ msger.debug("Compressing disk %s with %s" % (disk_name, self.compressor))
exec_cmd("%s %s" % (self.compressor, full_path))
def print_outimage_info(self):
@@ -375,6 +414,6 @@ class DirectImageCreator(BaseImageCreator):
if not self.__image is None:
try:
self.__image.cleanup()
- except ImageError, err:
+ except ImageError as err:
msger.warning("%s" % err)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/ksparser.py b/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
index 8c3f80882..0894e2b19 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
@@ -51,7 +51,7 @@ def sizetype(arg):
Converts size string in <num>[K|k|M|G] format into the integer value
"""
if arg.isdigit():
- return int(arg) * 1024L
+ return int(arg) * 1024
if not arg[:-1].isdigit():
raise ArgumentTypeError("Invalid size: %r" % arg)
@@ -60,9 +60,9 @@ def sizetype(arg):
if arg.endswith("k") or arg.endswith("K"):
return size
if arg.endswith("M"):
- return size * 1024L
+ return size * 1024
if arg.endswith("G"):
- return size * 1024L * 1024L
+ return size * 1024 * 1024
raise ArgumentTypeError("Invalid size: %r" % arg)
@@ -92,7 +92,25 @@ def cannedpathtype(arg):
raise ArgumentTypeError("file not found: %s" % arg)
return result
-class KickStart(object):
+def systemidtype(arg):
+ """
+ Custom type for ArgumentParser
+ Checks if the argument sutisfies system id requirements,
+ i.e. if it's one byte long integer > 0
+ """
+ error = "Invalid system type: %s. must be hex "\
+ "between 0x1 and 0xFF" % arg
+ try:
+ result = int(arg, 16)
+ except ValueError:
+ raise ArgumentTypeError(error)
+
+ if result <= 0 or result > 0xff:
+ raise ArgumentTypeError(error)
+
+ return arg
+
+class KickStart():
""""Kickstart parser implementation."""
def __init__(self, confpath):
@@ -106,10 +124,10 @@ class KickStart(object):
subparsers = parser.add_subparsers()
part = subparsers.add_parser('part')
- part.add_argument('mountpoint')
+ part.add_argument('mountpoint', nargs='?')
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
- part.add_argument("--extra-space", type=sizetype, default=10*1024L)
+ part.add_argument("--extra-space", type=sizetype, default=10*1024)
part.add_argument('--fsoptions', dest='fsopts')
part.add_argument('--fstype')
part.add_argument('--label')
@@ -121,6 +139,7 @@ class KickStart(object):
part.add_argument('--size', type=sizetype, default=0)
part.add_argument('--source')
part.add_argument('--sourceparams')
+ part.add_argument('--system-id', type=systemidtype)
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
diff --git a/import-layers/yocto-poky/scripts/lib/wic/msger.py b/import-layers/yocto-poky/scripts/lib/wic/msger.py
index b73755422..fb8336d94 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/msger.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/msger.py
@@ -21,18 +21,14 @@ import sys
import re
import time
-__ALL__ = ['set_mode',
- 'get_loglevel',
+__ALL__ = ['get_loglevel',
'set_loglevel',
'set_logfile',
- 'raw',
'debug',
'verbose',
'info',
'warning',
'error',
- 'ask',
- 'pause',
]
# COLORs in ANSI
@@ -70,10 +66,6 @@ def _general_print(head, color, msg=None, stream=None, level='normal'):
# skip
return
- # encode raw 'unicode' str to utf8 encoded str
- if msg and isinstance(msg, unicode):
- msg = msg.encode('utf-8', 'ignore')
-
errormsg = ''
if CATCHERR_BUFFILE_FD > 0:
size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
@@ -122,9 +114,6 @@ def _color_print(head, color, msg, stream, level):
newline = True
if msg is not None:
- if isinstance(msg, unicode):
- msg = msg.encode('utf8', 'ignore')
-
stream.write('%s%s' % (head, msg))
if newline:
stream.write('\n')
@@ -159,7 +148,7 @@ def _split_msg(head, msg):
return head, msg
def get_loglevel():
- return (k for k, v in LOG_LEVELS.items() if v == LOG_LEVEL).next()
+ return next((k for k, v in LOG_LEVELS.items() if v == LOG_LEVEL))
def set_loglevel(level):
global LOG_LEVEL
@@ -182,9 +171,6 @@ def log(msg=''):
if msg:
LOG_CONTENT += msg
-def raw(msg=''):
- _general_print('', NO_COLOR, msg)
-
def info(msg):
head, msg = _split_msg('Info', msg)
_general_print(head, INFO_COLOR, msg)
@@ -206,66 +192,6 @@ def error(msg):
_color_perror(head, ERR_COLOR, msg)
sys.exit(1)
-def ask(msg, default=True):
- _general_print('\rQ', ASK_COLOR, '')
- try:
- if default:
- msg += '(Y/n) '
- else:
- msg += '(y/N) '
- if INTERACTIVE:
- while True:
- repl = raw_input(msg)
- if repl.lower() == 'y':
- return True
- elif repl.lower() == 'n':
- return False
- elif not repl.strip():
- # <Enter>
- return default
-
- # else loop
- else:
- if default:
- msg += ' Y'
- else:
- msg += ' N'
- _general_print('', NO_COLOR, msg)
-
- return default
- except KeyboardInterrupt:
- sys.stdout.write('\n')
- sys.exit(2)
-
-def choice(msg, choices, default=0):
- if default >= len(choices):
- return None
- _general_print('\rQ', ASK_COLOR, '')
- try:
- msg += " [%s] " % '/'.join(choices)
- if INTERACTIVE:
- while True:
- repl = raw_input(msg)
- if repl in choices:
- return repl
- elif not repl.strip():
- return choices[default]
- else:
- msg += choices[default]
- _general_print('', NO_COLOR, msg)
-
- return choices[default]
- except KeyboardInterrupt:
- sys.stdout.write('\n')
- sys.exit(2)
-
-def pause(msg=None):
- if INTERACTIVE:
- _general_print('\rQ', ASK_COLOR, '')
- if msg is None:
- msg = 'press <ENTER> to continue ...'
- raw_input(msg)
-
def set_logfile(fpath):
global LOG_FILE_FP
diff --git a/import-layers/yocto-poky/scripts/lib/wic/partition.py b/import-layers/yocto-poky/scripts/lib/wic/partition.py
index f40d1bc8b..90f65a1e3 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/partition.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/partition.py
@@ -26,7 +26,6 @@
import os
import tempfile
-import uuid
from wic.utils.oe.misc import msger, parse_sourceparams
from wic.utils.oe.misc import exec_cmd, exec_native_cmd
@@ -38,7 +37,7 @@ partition_methods = {
"do_configure_partition":None,
}
-class Partition(object):
+class Partition():
def __init__(self, args, lineno):
self.args = args
@@ -57,10 +56,9 @@ class Partition(object):
self.size = args.size
self.source = args.source
self.sourceparams = args.sourceparams
+ self.system_id = args.system_id
self.use_uuid = args.use_uuid
self.uuid = args.uuid
- if args.use_uuid and not self.uuid:
- self.uuid = str(uuid.uuid4())
self.lineno = lineno
self.source_file = ""
@@ -219,9 +217,7 @@ class Partition(object):
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, self.mountpoint, rootfs_size))
- dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
- (rootfs, rootfs_size)
- exec_cmd(dd_cmd)
+ exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
extra_imagecmd = "-i 8192"
@@ -254,9 +250,7 @@ class Partition(object):
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, self.mountpoint, rootfs_size))
- dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=0 bs=1k" % \
- (rootfs, rootfs_size)
- exec_cmd(dd_cmd)
+ exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
label_str = ""
if self.label:
@@ -284,14 +278,6 @@ class Partition(object):
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, self.mountpoint, blocks))
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation
- # is done in blocks, thus the mod by 16 instead of 32. Apply
- # sector count fix only when needed.
- if blocks % 16 != 0:
- blocks += (16 - (blocks % 16))
-
label_str = "-n boot"
if self.label:
label_str = "-n %s" % self.label
@@ -319,9 +305,7 @@ class Partition(object):
"""
Prepare an empty ext2/3/4 partition.
"""
- dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
- (rootfs, self.size)
- exec_cmd(dd_cmd)
+ exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
extra_imagecmd = "-i 8192"
@@ -338,9 +322,7 @@ class Partition(object):
"""
Prepare an empty btrfs partition.
"""
- dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
- (rootfs, self.size)
- exec_cmd(dd_cmd)
+ exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
label_str = ""
if self.label:
@@ -401,9 +383,7 @@ class Partition(object):
"""
path = "%s/fs.%s" % (cr_workdir, self.fstype)
- dd_cmd = "dd if=/dev/zero of=%s bs=1k seek=%d count=0" % \
- (path, self.size)
- exec_cmd(dd_cmd)
+ exec_cmd("truncate %s -s %d" % (path, self.size * 1024))
import uuid
label_str = ""
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugin.py b/import-layers/yocto-poky/scripts/lib/wic/plugin.py
index ccfdfcb93..306b32437 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugin.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugin.py
@@ -29,7 +29,7 @@ PLUGIN_TYPES = ["imager", "source"]
PLUGIN_DIR = "/lib/wic/plugins" # relative to scripts
SCRIPTS_PLUGIN_DIR = "scripts" + PLUGIN_DIR
-class PluginMgr(object):
+class PluginMgr():
plugin_dirs = {}
# make the manager class as singleton
@@ -42,7 +42,7 @@ class PluginMgr(object):
def __init__(self):
wic_path = os.path.dirname(__file__)
- eos = wic_path.find('scripts') + len('scripts')
+ eos = wic_path.rfind('scripts') + len('scripts')
scripts_path = wic_path[:eos]
self.scripts_path = scripts_path
self.plugin_dir = scripts_path + PLUGIN_DIR
@@ -81,7 +81,7 @@ class PluginMgr(object):
# the value True/False means "loaded"
def _load_all(self):
- for (pdir, loaded) in self.plugin_dirs.iteritems():
+ for (pdir, loaded) in self.plugin_dirs.items():
if loaded:
continue
@@ -97,7 +97,7 @@ class PluginMgr(object):
self.plugin_dirs[pdir] = True
msger.debug("Plugin module %s:%s imported"\
% (mod, pymod.__file__))
- except ImportError, err:
+ except ImportError as err:
msg = 'Failed to load plugin %s/%s: %s' \
% (os.path.basename(pdir), mod, err)
msger.warning(msg)
@@ -135,9 +135,9 @@ class PluginMgr(object):
None is returned.
"""
return_methods = None
- for _source_name, klass in self.get_plugins('source').iteritems():
+ for _source_name, klass in self.get_plugins('source').items():
if _source_name == source_name:
- for _method_name in methods.keys():
+ for _method_name in methods:
if not hasattr(klass, _method_name):
msger.warning("Unimplemented %s source interface for: %s"\
% (_method_name, _source_name))
diff --git a/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py b/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
index ee8fe95c6..e737dee7b 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
@@ -15,34 +15,26 @@
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-from wic import msger
-
-class _Plugin(object):
- class __metaclass__(type):
- def __init__(cls, name, bases, attrs):
- if not hasattr(cls, 'plugins'):
- cls.plugins = {}
-
- elif 'wic_plugin_type' in attrs:
- if attrs['wic_plugin_type'] not in cls.plugins:
- cls.plugins[attrs['wic_plugin_type']] = {}
+__all__ = ['ImagerPlugin', 'SourcePlugin', 'get_plugins']
- elif hasattr(cls, 'wic_plugin_type') and 'name' in attrs:
- cls.plugins[cls.wic_plugin_type][attrs['name']] = cls
+import sys
+from collections import defaultdict
- def show_plugins(cls):
- for cls in cls.plugins[cls.wic_plugin_type]:
- print cls
+from wic import msger
- def get_plugins(cls):
- return cls.plugins
+class PluginMeta(type):
+ plugins = defaultdict(dict)
+ def __new__(cls, name, bases, attrs):
+ class_type = type.__new__(cls, name, bases, attrs)
+ if 'name' in attrs:
+ cls.plugins[class_type.wic_plugin_type][attrs['name']] = class_type
+ return class_type
-class ImagerPlugin(_Plugin):
+class ImagerPlugin(PluginMeta("Plugin", (), {})):
wic_plugin_type = "imager"
-
-class SourcePlugin(_Plugin):
+class SourcePlugin(PluginMeta("Plugin", (), {})):
wic_plugin_type = "source"
"""
The methods that can be implemented by --source plugins.
@@ -99,10 +91,4 @@ class SourcePlugin(_Plugin):
msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part)
def get_plugins(typen):
- plugins = ImagerPlugin.get_plugins()
- if typen in plugins:
- return plugins[typen]
- else:
- return None
-
-__all__ = ['ImagerPlugin', 'SourcePlugin', 'get_plugins']
+ return PluginMeta.plugins.get(typen)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py
index 6d3f46cc6..8fe393080 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py
@@ -86,7 +86,8 @@ class DirectPlugin(ImagerPlugin):
kernel_dir,
native_sysroot,
compressor,
- creatoropts)
+ creatoropts,
+ opts.bmap)
try:
creator.create()
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
index a4734c9b3..8bc362254 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -84,7 +84,7 @@ class BootimgEFIPlugin(SourcePlugin):
@classmethod
def do_configure_gummiboot(cls, hdddir, creator, cr_workdir):
"""
- Create loader-specific (gummiboot) config
+ Create loader-specific systemd-boot/gummiboot config
"""
install_cmd = "install -d %s/loader" % hdddir
exec_cmd(install_cmd)
@@ -149,7 +149,8 @@ class BootimgEFIPlugin(SourcePlugin):
try:
if source_params['loader'] == 'grub-efi':
cls.do_configure_grubefi(hdddir, creator, cr_workdir)
- elif source_params['loader'] == 'gummiboot':
+ elif source_params['loader'] == 'gummiboot' \
+ or source_params['loader'] == 'systemd-boot':
cls.do_configure_gummiboot(hdddir, creator, cr_workdir)
else:
msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
@@ -189,7 +190,8 @@ class BootimgEFIPlugin(SourcePlugin):
exec_cmd(cp_cmd, True)
shutil.move("%s/grub.cfg" % cr_workdir,
"%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
- elif source_params['loader'] == 'gummiboot':
+ elif source_params['loader'] == 'gummiboot' \
+ or source_params['loader'] == 'systemd-boot':
cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
exec_cmd(cp_cmd, True)
else:
@@ -197,6 +199,11 @@ class BootimgEFIPlugin(SourcePlugin):
except KeyError:
msger.error("bootimg-efi requires a loader, none specified")
+ startup = os.path.join(bootimg_dir, "startup.nsh")
+ if os.path.exists(startup):
+ cp_cmd = "cp %s %s/" % (startup, hdddir)
+ exec_cmd(cp_cmd, True)
+
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
@@ -211,12 +218,6 @@ class BootimgEFIPlugin(SourcePlugin):
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, part.mountpoint, blocks))
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation is
- # done in blocks, thus the mod by 16 instead of 32.
- blocks += (16 - (blocks % 16))
-
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index 5b719bf3b..f204daa32 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -179,12 +179,6 @@ class BootimgPcbiosPlugin(SourcePlugin):
msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
(extra_blocks, part.mountpoint, blocks))
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation is
- # done in blocks, thus the mod by 16 instead of 32.
- blocks += (16 - (blocks % 16))
-
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index bc9928314..3858fd439 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -27,6 +27,7 @@ import glob
from wic import msger
from wic.pluginbase import SourcePlugin
+from wic.utils.misc import get_custom_config
from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var
class IsoImagePlugin(SourcePlugin):
@@ -59,7 +60,7 @@ class IsoImagePlugin(SourcePlugin):
"""
Create loader-specific (syslinux) config
"""
- splash = os.path.join(cr_workdir, "/ISO/boot/splash.jpg")
+ splash = os.path.join(cr_workdir, "ISO/boot/splash.jpg")
if os.path.exists(splash):
splashline = "menu background splash.jpg"
else:
@@ -94,33 +95,43 @@ class IsoImagePlugin(SourcePlugin):
"""
Create loader-specific (grub-efi) config
"""
- splash = os.path.join(cr_workdir, "/EFI/boot/splash.jpg")
- if os.path.exists(splash):
- splashline = "menu background splash.jpg"
+ configfile = creator.ks.bootloader.configfile
+ if configfile:
+ grubefi_conf = get_custom_config(configfile)
+ if grubefi_conf:
+ msger.debug("Using custom configuration file "
+ "%s for grub.cfg" % configfile)
+ else:
+ msger.error("configfile is specified but failed to "
+ "get it from %s." % configfile)
else:
- splashline = ""
+ splash = os.path.join(cr_workdir, "EFI/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
- bootloader = creator.ks.bootloader
+ bootloader = creator.ks.bootloader
- grubefi_conf = ""
- grubefi_conf += "serial --unit=0 --speed=115200 --word=8 "
- grubefi_conf += "--parity=no --stop=1\n"
- grubefi_conf += "default=boot\n"
- grubefi_conf += "timeout=%s\n" % (bootloader.timeout or 10)
- grubefi_conf += "\n"
- grubefi_conf += "search --set=root --label %s " % part.label
- grubefi_conf += "\n"
- grubefi_conf += "menuentry 'boot'{\n"
+ grubefi_conf = ""
+ grubefi_conf += "serial --unit=0 --speed=115200 --word=8 "
+ grubefi_conf += "--parity=no --stop=1\n"
+ grubefi_conf += "default=boot\n"
+ grubefi_conf += "timeout=%s\n" % (bootloader.timeout or 10)
+ grubefi_conf += "\n"
+ grubefi_conf += "search --set=root --label %s " % part.label
+ grubefi_conf += "\n"
+ grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = "/bzImage"
- grubefi_conf += "linux %s rootwait %s\n" \
- % (kernel, bootloader.append)
- grubefi_conf += "initrd /initrd \n"
- grubefi_conf += "}\n"
+ grubefi_conf += "linux %s rootwait %s\n" \
+ % (kernel, bootloader.append)
+ grubefi_conf += "initrd /initrd \n"
+ grubefi_conf += "}\n"
- if splashline:
- grubefi_conf += "%s\n" % splashline
+ if splashline:
+ grubefi_conf += "%s\n" % splashline
msger.debug("Writing grubefi config %s/EFI/BOOT/grub.cfg" \
% cr_workdir)
@@ -430,12 +441,6 @@ class IsoImagePlugin(SourcePlugin):
% (part.mountpoint, blocks)
msger.debug(msg)
- # Ensure total sectors is an integral number of sectors per
- # track or mcopy will complain. Sectors are 512 bytes, and we
- # generate images with 32 sectors per track. This calculation is
- # done in blocks, thus the mod by 16 instead of 32.
- blocks += (16 - (blocks % 16))
-
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
index 0472f536b..e0b11f95a 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
@@ -20,6 +20,7 @@ import os
from wic import msger
from wic.pluginbase import SourcePlugin
from wic.utils.oe.misc import exec_cmd, get_bitbake_var
+from wic.filemap import sparse_copy
class RawCopyPlugin(SourcePlugin):
"""
@@ -67,14 +68,12 @@ class RawCopyPlugin(SourcePlugin):
return
src = os.path.join(bootimg_dir, source_params['file'])
- dst = os.path.join(cr_workdir, source_params['file'])
+ dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
if 'skip' in source_params:
- dd_cmd = "dd if=%s of=%s ibs=%s skip=1 conv=notrunc" % \
- (src, dst, source_params['skip'])
+ sparse_copy(src, dst, skip=source_params['skip'])
else:
- dd_cmd = "cp %s %s" % (src, dst)
- exec_cmd(dd_cmd)
+ sparse_copy(src, dst)
# get the size in the right units for kickstart (kB)
du_cmd = "du -Lbks %s" % dst
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/fs_related.py b/import-layers/yocto-poky/scripts/lib/wic/utils/fs_related.py
deleted file mode 100644
index 2e74461a4..000000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/fs_related.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2007, Red Hat, Inc.
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-from __future__ import with_statement
-import os
-import errno
-
-from wic.utils.oe.misc import exec_cmd
-
-def makedirs(dirname):
- """A version of os.makedirs() that doesn't throw an
- exception if the leaf directory already exists.
- """
- try:
- os.makedirs(dirname)
- except OSError, err:
- if err.errno != errno.EEXIST:
- raise
-
-class Disk:
- """
- Generic base object for a disk.
- """
- def __init__(self, size, device=None):
- self._device = device
- self._size = size
-
- def create(self):
- pass
-
- def cleanup(self):
- pass
-
- def get_device(self):
- return self._device
- def set_device(self, path):
- self._device = path
- device = property(get_device, set_device)
-
- def get_size(self):
- return self._size
- size = property(get_size)
-
-
-class DiskImage(Disk):
- """
- A Disk backed by a file.
- """
- def __init__(self, image_file, size):
- Disk.__init__(self, size)
- self.image_file = image_file
-
- def exists(self):
- return os.path.exists(self.image_file)
-
- def create(self):
- if self.device is not None:
- return
-
- blocks = self.size / 1024
- if self.size - blocks * 1024:
- blocks += 1
-
- # create disk image
- dd_cmd = "dd if=/dev/zero of=%s bs=1024 seek=%d count=1" % \
- (self.image_file, blocks)
- exec_cmd(dd_cmd)
-
- self.device = self.image_file
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py b/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py
index 81239ac35..fe188c9d2 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py
@@ -28,12 +28,14 @@
import os
from collections import defaultdict
+from distutils import spawn
from wic import msger
from wic.utils import runner
# executable -> recipe pairs for exec_native_cmd
-NATIVE_RECIPES = {"mcopy": "mtools",
+NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+ "mcopy": "mtools",
"mkdosfs": "dosfstools",
"mkfs.btrfs": "btrfs-tools",
"mkfs.ext2": "e2fsprogs",
@@ -43,6 +45,7 @@ NATIVE_RECIPES = {"mcopy": "mtools",
"mksquashfs": "squashfs-tools",
"mkswap": "util-linux",
"parted": "parted",
+ "sfdisk": "util-linux",
"sgdisk": "gptfdisk",
"syslinux": "syslinux"
}
@@ -82,13 +85,6 @@ def exec_cmd(cmd_and_args, as_shell=False, catch=3):
return out
-def cmd_in_path(cmd, path):
- import scriptpath
-
- scriptpath.add_bitbake_lib_path()
-
- return bb.utils.which(path, cmd) != "" or False
-
def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
"""
Execute native command, catching stderr, stdout
@@ -111,7 +107,7 @@ def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
msger.debug("exec_native_cmd: %s" % cmd_and_args)
# If the command isn't in the native sysroot say we failed.
- if cmd_in_path(args[0], native_paths):
+ if spawn.find_executable(args[0], native_paths):
ret, out = _exec_cmd(native_cmd_and_args, True, catch)
else:
ret = 127
@@ -186,8 +182,8 @@ class BitbakeVars(defaultdict):
for line in varsfile:
self._parse_line(line, image)
else:
- print "Couldn't get bitbake variable from %s." % fname
- print "File %s doesn't exist." % fname
+ print("Couldn't get bitbake variable from %s." % fname)
+ print("File %s doesn't exist." % fname)
return
else:
# Get bitbake -e output
@@ -201,8 +197,8 @@ class BitbakeVars(defaultdict):
msger.set_loglevel(log_level)
if ret:
- print "Couldn't get '%s' output." % cmd
- print "Bitbake failed with error:\n%s\n" % lines
+ print("Couldn't get '%s' output." % cmd)
+ print("Bitbake failed with error:\n%s\n" % lines)
return
# Parse bitbake -e output
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py b/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py
index ad596d26f..cb03009fc 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py
@@ -22,6 +22,7 @@ import os
from wic import msger
from wic.utils.errors import ImageError
from wic.utils.oe.misc import exec_cmd, exec_native_cmd
+from wic.filemap import sparse_copy
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
@@ -32,7 +33,7 @@ GPT_OVERHEAD = 34
# Size of a sector in bytes
SECTOR_SIZE = 512
-class Image(object):
+class Image():
"""
Generic base object for an image.
@@ -42,6 +43,7 @@ class Image(object):
def __init__(self, native_sysroot=None):
self.disks = {}
self.partitions = []
+ self.partimages = []
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self._partitions_layed_out = False
@@ -66,15 +68,17 @@ class Image(object):
'offset': 0, # Offset of next partition (in sectors)
# Minimum required disk size to fit all partitions (in bytes)
'min_size': 0,
- 'ptable_format': "msdos"} # Partition table format
+ 'ptable_format': "msdos", # Partition table format
+ 'identifier': None} # Disk system identifier
- def add_disk(self, disk_name, disk_obj):
+ def add_disk(self, disk_name, disk_obj, identifier):
""" Add a disk object which have to be partitioned. More than one disk
can be added. In case of multiple disks, disk partitions have to be
added for each disk separately with 'add_partition()". """
self.__add_disk(disk_name)
self.disks[disk_name]['disk'] = disk_obj
+ self.disks[disk_name]['identifier'] = identifier
def __add_partition(self, part):
""" This is a helper function for 'add_partition()' which adds a
@@ -87,14 +91,14 @@ class Image(object):
def add_partition(self, size, disk_name, mountpoint, source_file=None, fstype=None,
label=None, fsopts=None, boot=False, align=None, no_table=False,
- part_type=None, uuid=None):
+ part_type=None, uuid=None, system_id=None):
""" Add the next partition. Prtitions have to be added in the
first-to-last order. """
ks_pnum = len(self.partitions)
# Converting kB to sectors for parted
- size = size * 1024 / self.sector_size
+ size = size * 1024 // self.sector_size
part = {'ks_pnum': ks_pnum, # Partition number in the KS file
'size': size, # In sectors
@@ -110,7 +114,8 @@ class Image(object):
'align': align, # Partition alignment
'no_table' : no_table, # Partition does not appear in partition table
'part_type' : part_type, # Partition type
- 'uuid': uuid} # Partition UUID
+ 'uuid': uuid, # Partition UUID
+ 'system_id': system_id} # Partition system id
self.__add_partition(part)
@@ -130,7 +135,7 @@ class Image(object):
for num in range(len(self.partitions)):
part = self.partitions[num]
- if not self.disks.has_key(part['disk_name']):
+ if part['disk_name'] not in self.disks:
raise ImageError("No disk %s for partition %s" \
% (part['disk_name'], part['mountpoint']))
@@ -171,12 +176,12 @@ class Image(object):
# gaps we could enlargea the previous partition?
# Calc how much the alignment is off.
- align_sectors = disk['offset'] % (part['align'] * 1024 / self.sector_size)
+ align_sectors = disk['offset'] % (part['align'] * 1024 // self.sector_size)
if align_sectors:
# If partition is not aligned as required, we need
# to move forward to the next alignment point
- align_sectors = (part['align'] * 1024 / self.sector_size) - align_sectors
+ align_sectors = (part['align'] * 1024 // self.sector_size) - align_sectors
msger.debug("Realignment for %s%s with %s sectors, original"
" offset %s, target alignment is %sK." %
@@ -234,7 +239,7 @@ class Image(object):
def __format_disks(self):
self.layout_partitions()
- for dev in self.disks.keys():
+ for dev in self.disks:
disk = self.disks[dev]
msger.debug("Initializing partition table for %s" % \
(disk['disk'].device))
@@ -242,6 +247,12 @@ class Image(object):
(disk['disk'].device, disk['ptable_format']),
self.native_sysroot)
+ if disk['identifier']:
+ msger.debug("Set disk identifier %x" % disk['identifier'])
+ with open(disk['disk'].device, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(disk['identifier'].to_bytes(4, 'little'))
+
msger.debug("Creating partitions")
for part in self.partitions:
@@ -296,7 +307,7 @@ class Image(object):
(part['num'], part['part_type'],
disk['disk'].device), self.native_sysroot)
- if part['uuid']:
+ if part['uuid'] and disk['ptable_format'] == "gpt":
msger.debug("partition %d: set UUID to %s" % \
(part['num'], part['uuid']))
exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
@@ -310,6 +321,10 @@ class Image(object):
exec_native_cmd("parted -s %s set %d %s on" % \
(disk['disk'].device, part['num'], flag_name),
self.native_sysroot)
+ if part['system_id']:
+ exec_native_cmd("sfdisk --part-type %s %s %s" % \
+ (disk['disk'].device, part['num'], part['system_id']),
+ self.native_sysroot)
# Parted defaults to enabling the lba flag for fat16 partitions,
# which causes compatibility issues with some firmware (and really
@@ -330,6 +345,10 @@ class Image(object):
disk['disk'].cleanup()
except:
pass
+ # remove partition images
+ for image in self.partimages:
+ if os.path.isfile(image):
+ os.remove(image)
def assemble(self, image_file):
msger.debug("Installing partitions")
@@ -338,20 +357,19 @@ class Image(object):
source = part['source_file']
if source:
# install source_file contents into a partition
- cmd = "dd if=%s of=%s bs=%d seek=%d count=%d conv=notrunc" % \
- (source, image_file, self.sector_size,
- part['start'], part['size'])
- exec_cmd(cmd)
+ sparse_copy(source, image_file, part['start'] * self.sector_size)
msger.debug("Installed %s in partition %d, sectors %d-%d, "
"size %d sectors" % \
(source, part['num'], part['start'],
part['start'] + part['size'] - 1, part['size']))
- os.rename(source, image_file + '.p%d' % part['num'])
+ partimage = image_file + '.p%d' % part['num']
+ os.rename(source, partimage)
+ self.partimages.append(partimage)
def create(self):
- for dev in self.disks.keys():
+ for dev in self.disks:
disk = self.disks[dev]
disk['disk'].create()
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py b/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
index 7431917ff..db536ba58 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
@@ -65,9 +65,9 @@ def runtool(cmdln_or_args, catch=1):
process = subprocess.Popen(cmdln_or_args, stdout=sout,
stderr=serr, shell=shell)
(sout, serr) = process.communicate()
- # combine stdout and stderr, filter None out
- out = ''.join(filter(None, [sout, serr]))
- except OSError, err:
+ # combine stdout and stderr, filter None out and decode
+ out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
+ except OSError as err:
if err.errno == 2:
# [Errno 2] No such file or directory
msger.error('Cannot run command: %s, lost dependency?' % cmd)
diff --git a/import-layers/yocto-poky/scripts/lnr b/import-layers/yocto-poky/scripts/lnr
index 9dacebe09..5fed780eb 100755
--- a/import-layers/yocto-poky/scripts/lnr
+++ b/import-layers/yocto-poky/scripts/lnr
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
# Create a *relative* symlink, just like ln --relative does but without needing
# coreutils 8.16.
@@ -6,7 +6,7 @@
import sys, os
if len(sys.argv) != 3:
- print "$ lnr TARGET LINK_NAME"
+ print("$ lnr TARGET LINK_NAME")
sys.exit(1)
target = sys.argv[1]
diff --git a/import-layers/yocto-poky/scripts/oe-build-perf-test b/import-layers/yocto-poky/scripts/oe-build-perf-test
new file mode 100755
index 000000000..638e195ef
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-build-perf-test
@@ -0,0 +1,211 @@
+#!/usr/bin/python3
+#
+# Build performance test script
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Build performance test script"""
+import argparse
+import errno
+import fcntl
+import logging
+import os
+import shutil
+import sys
+import unittest
+from datetime import datetime
+
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
+import scriptpath
+scriptpath.add_oe_lib_path()
+import oeqa.buildperf
+from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
+ BuildPerfTestRunner, KernelDropCaches)
+from oeqa.utils.commands import runCmd
+from oeqa.utils.git import GitRepo, GitError
+
+
+# Set-up logging
+LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
+logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
+ datefmt='%Y-%m-%d %H:%M:%S')
+log = logging.getLogger()
+
+
+def acquire_lock(lock_f):
+ """Acquire flock on file"""
+ log.debug("Acquiring lock %s", os.path.abspath(lock_f.name))
+ try:
+ fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError as err:
+ if err.errno == errno.EAGAIN:
+ return False
+ raise
+ log.debug("Lock acquired")
+ return True
+
+
+def pre_run_sanity_check():
+ """Sanity check of build environment"""
+ build_dir = os.environ.get("BUILDDIR")
+ if not build_dir:
+ log.error("BUILDDIR not set. Please run the build environmnent setup "
+ "script.")
+ return False
+ if os.getcwd() != build_dir:
+ log.error("Please run this script under BUILDDIR (%s)", build_dir)
+ return False
+
+ ret = runCmd('which bitbake', ignore_status=True)
+ if ret.status:
+ log.error("bitbake command not found")
+ return False
+ return True
+
+def init_git_repo(path):
+ """Check/create Git repository where to store results"""
+ path = os.path.abspath(path)
+ if os.path.isfile(path):
+ log.error("Invalid Git repo %s: path exists but is not a directory", path)
+ return False
+ if not os.path.isdir(path):
+ try:
+ os.mkdir(path)
+ except (FileNotFoundError, PermissionError) as err:
+ log.error("Failed to mkdir %s: %s", path, err)
+ return False
+ if not os.listdir(path):
+ log.info("Initializing a new Git repo at %s", path)
+ GitRepo.init(path)
+ try:
+ GitRepo(path, is_topdir=True)
+ except GitError:
+ log.error("No Git repository but a non-empty directory found at %s.\n"
+ "Please specify a Git repository, an empty directory or "
+ "a non-existing directory", path)
+ return False
+ return True
+
+
+def setup_file_logging(log_file):
+ """Setup loggin to file"""
+ log_dir = os.path.dirname(log_file)
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+ formatter = logging.Formatter(LOG_FORMAT)
+ handler = logging.FileHandler(log_file)
+ handler.setFormatter(formatter)
+ log.addHandler(handler)
+
+
+def archive_build_conf(out_dir):
+ """Archive build/conf to test results"""
+ src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
+ tgt_dir = os.path.join(out_dir, 'build', 'conf')
+ os.makedirs(os.path.dirname(tgt_dir))
+ shutil.copytree(src_dir, tgt_dir)
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument('-D', '--debug', action='store_true',
+ help='Enable debug level logging')
+ parser.add_argument('--globalres-file',
+ type=os.path.abspath,
+ help="Append results to 'globalres' csv file")
+ parser.add_argument('--lock-file', default='./oe-build-perf.lock',
+ metavar='FILENAME', type=os.path.abspath,
+ help="Lock file to use")
+ parser.add_argument('-o', '--out-dir', default='results-{date}',
+ type=os.path.abspath,
+ help="Output directory for test results")
+ parser.add_argument('--log-file',
+ default='{out_dir}/oe-build-perf-test.log',
+ help="Log file of this script")
+ parser.add_argument('--run-tests', nargs='+', metavar='TEST',
+ help="List of tests to run")
+ parser.add_argument('--commit-results', metavar='GIT_DIR',
+ type=os.path.abspath,
+ help="Commit result data to a (local) git repository")
+ parser.add_argument('--commit-results-branch', metavar='BRANCH',
+ default="{git_branch}",
+ help="Commit results to branch BRANCH.")
+ parser.add_argument('--commit-results-tag', metavar='TAG',
+ default="{git_branch}/{git_commit_count}-g{git_commit}/{tag_num}",
+ help="Tag results commit with TAG.")
+
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+
+ # Set-up log file
+ out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
+ setup_file_logging(args.log_file.format(out_dir=out_dir))
+
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ lock_f = open(args.lock_file, 'w')
+ if not acquire_lock(lock_f):
+ log.error("Another instance of this script is running, exiting...")
+ return 1
+
+ if not pre_run_sanity_check():
+ return 1
+ if args.commit_results:
+ if not init_git_repo(args.commit_results):
+ return 1
+
+ # Check our capability to drop caches and ask pass if needed
+ KernelDropCaches.check()
+
+ # Load build perf tests
+ loader = BuildPerfTestLoader()
+ if args.run_tests:
+ suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
+ else:
+ suite = loader.loadTestsFromModule(oeqa.buildperf)
+
+ archive_build_conf(out_dir)
+ runner = BuildPerfTestRunner(out_dir, verbosity=2)
+
+ # Suppress logger output to stderr so that the output from unittest
+ # is not mixed with occasional logger output
+ log.handlers[0].setLevel(logging.CRITICAL)
+
+ # Run actual tests
+ result = runner.run(suite)
+
+ # Restore logger output to stderr
+ log.handlers[0].setLevel(log.level)
+
+ if args.globalres_file:
+ result.update_globalres_file(args.globalres_file)
+ if args.commit_results:
+ result.git_commit_results(args.commit_results,
+ args.commit_results_branch,
+ args.commit_results_tag)
+ if result.wasSuccessful():
+ return 0
+
+ return 2
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
diff --git a/import-layers/yocto-poky/scripts/oe-buildenv-internal b/import-layers/yocto-poky/scripts/oe-buildenv-internal
index e04db0398..9fae3b4ec 100755
--- a/import-layers/yocto-poky/scripts/oe-buildenv-internal
+++ b/import-layers/yocto-poky/scripts/oe-buildenv-internal
@@ -29,27 +29,31 @@ if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
return 1
fi
-# Make sure we're not using python v3.x. This check can't go into
-# sanity.bbclass because bitbake's source code doesn't even pass
-# parsing stage when used with python v3, so we catch it here so we
-# can offer a meaningful error message.
-py_v3_check=$(/usr/bin/env python --version 2>&1 | grep "Python 3")
-if [ -n "$py_v3_check" ]; then
- echo >&2 "Bitbake is not compatible with python v3"
- echo >&2 "Please set up python v2 as your default python interpreter"
+# Make sure we're not using python v3.x as 'python', we don't support it.
+py_v2_check=$(/usr/bin/env python --version 2>&1 | grep "Python 3")
+if [ -n "$py_v2_check" ]; then
+ echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
+ echo >&2 "Please set up python v2 as your default 'python' interpreter."
return 1
fi
-unset py_v3_check
-
-# Similarly, we now have code that doesn't parse correctly with older
-# versions of Python, and rather than fixing that and being eternally
-# vigilant for any other new feature use, just check the version here.
-py_v26_check=$(python -c 'import sys; print sys.version_info >= (2,7,3)')
-if [ "$py_v26_check" != "True" ]; then
- echo >&2 "BitBake requires Python 2.7.3 or later"
+unset py_v2_check
+
+py_v27_check=$(python -c 'import sys; print sys.version_info >= (2,7,3)')
+if [ "$py_v27_check" != "True" ]; then
+ echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
+ echo >&2 "Please upgrade your python v2."
+fi
+unset py_v27_check
+
+# We potentially have code that doesn't parse correctly with older versions
+# of Python, and rather than fixing that and being eternally vigilant for
+# any other new feature use, just check the version here.
+py_v34_check=$(python3 -c 'import sys; print(sys.version_info >= (3,4,0))')
+if [ "$py_v34_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.4.0 or later as 'python3'"
return 1
fi
-unset py_v26_check
+unset py_v34_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -92,7 +96,7 @@ BITBAKEDIR=$(readlink -f "$BITBAKEDIR")
BUILDDIR=$(readlink -f "$BUILDDIR")
if [ ! -d "$BITBAKEDIR" ]; then
- echo >&2 "Error: The bitbake directory ($BITBAKEDIR) does not exist! Please ensure a copy of bitbake exists at this location"
+ echo >&2 "Error: The bitbake directory ($BITBAKEDIR) does not exist! Please ensure a copy of bitbake exists at this location or specify an alternative path on the command line"
return 1
fi
@@ -114,7 +118,7 @@ BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
-SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR"
+SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE"
BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
diff --git a/import-layers/yocto-poky/scripts/oe-check-sstate b/import-layers/yocto-poky/scripts/oe-check-sstate
new file mode 100755
index 000000000..d06efe436
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-check-sstate
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+
+# Query which tasks will be restored from sstate
+#
+# Copyright 2016 Intel Corporation
+# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import subprocess
+import tempfile
+import shutil
+import re
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+import argparse_oe
+
+
+def translate_virtualfns(tasks):
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil()
+ try:
+ tinfoil.prepare(False)
+
+ recipecaches = tinfoil.cooker.recipecaches
+ outtasks = []
+ for task in tasks:
+ (mc, fn, taskname) = bb.runqueue.split_tid(task)
+ if taskname.endswith('_setscene'):
+ taskname = taskname[:-9]
+ outtasks.append('%s:%s' % (recipecaches[mc].pkg_fn[fn], taskname))
+ finally:
+ tinfoil.shutdown()
+ return outtasks
+
+
+def check(args):
+ tmpdir = tempfile.mkdtemp(prefix='oe-check-sstate-')
+ try:
+ env = os.environ.copy()
+ if not args.same_tmpdir:
+ env['BB_ENV_EXTRAWHITE'] = env.get('BB_ENV_EXTRAWHITE', '') + ' TMPDIR_forcevariable'
+ env['TMPDIR_forcevariable'] = tmpdir
+
+ try:
+ output = subprocess.check_output(
+ 'bitbake -n %s' % ' '.join(args.target),
+ stderr=subprocess.STDOUT,
+ env=env,
+ shell=True)
+
+ task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ tasks = []
+ for line in output.decode('utf-8').splitlines():
+ res = task_re.match(line)
+ if res:
+ tasks.append(res.group(1))
+ outtasks = translate_virtualfns(tasks)
+ except subprocess.CalledProcessError as e:
+ print('ERROR: bitbake failed:\n%s' % e.output.decode('utf-8'))
+ return e.returncode
+ finally:
+ shutil.rmtree(tmpdir)
+
+ if args.log:
+ with open(args.log, 'wb') as f:
+ f.write(output)
+
+ if args.outfile:
+ with open(args.outfile, 'w') as f:
+ for task in outtasks:
+ f.write('%s\n' % task)
+ else:
+ for task in outtasks:
+ print(task)
+
+ return 0
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description='OpenEmbedded sstate check tool. Does a dry-run to check restoring the specified targets from shared state, and lists the tasks that would be restored. Set BB_SETSCENE_ENFORCE=1 in the environment if you wish to ensure real tasks are disallowed.')
+
+ parser.add_argument('target', nargs='+', help='Target to check')
+ parser.add_argument('-o', '--outfile', help='Write list to a file instead of stdout')
+ parser.add_argument('-l', '--log', help='Write full log to a file')
+ parser.add_argument('-s', '--same-tmpdir', action='store_true', help='Use same TMPDIR for check (list will then be dependent on what tasks have executed previously)')
+
+ parser.set_defaults(func=check)
+
+ args = parser.parse_args()
+
+ ret = args.func(args)
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/import-layers/yocto-poky/scripts/oe-git-proxy b/import-layers/yocto-poky/scripts/oe-git-proxy
index 124790240..0078e9545 100755
--- a/import-layers/yocto-poky/scripts/oe-git-proxy
+++ b/import-layers/yocto-poky/scripts/oe-git-proxy
@@ -86,13 +86,14 @@ match_host() {
# Match by netmask
if valid_ipv4 $GLOB; then
- HOST_IP=$(gethostip -d $HOST)
- if valid_ipv4 $HOST_IP; then
- match_ipv4 $GLOB $HOST_IP
- if [ $? -eq 0 ]; then
- return 0
+ for HOST_IP in $(getent ahostsv4 $HOST | grep ' STREAM ' | cut -d ' ' -f 1) ; do
+ if valid_ipv4 $HOST_IP; then
+ match_ipv4 $GLOB $HOST_IP
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
fi
- fi
+ done
fi
return 1
diff --git a/import-layers/yocto-poky/scripts/oe-gnome-terminal-phonehome b/import-layers/yocto-poky/scripts/oe-gnome-terminal-phonehome
new file mode 100755
index 000000000..e02354883
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-gnome-terminal-phonehome
@@ -0,0 +1,10 @@
+#!/bin/sh
+#
+# Gnome terminal won't tell us which PID a given command is run as
+# or allow a single instance so we can't tell when it completes.
+# This allows us to figure out the PID of the target so we can tell
+# when its done.
+#
+echo $$ > $1
+shift
+exec $@
diff --git a/import-layers/yocto-poky/scripts/oe-pkgdata-util b/import-layers/yocto-poky/scripts/oe-pkgdata-util
index a04e44d35..bb917b4fc 100755
--- a/import-layers/yocto-poky/scripts/oe-pkgdata-util
+++ b/import-layers/yocto-poky/scripts/oe-pkgdata-util
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# OpenEmbedded pkgdata utility
#
@@ -240,7 +240,7 @@ def lookup_pkg(args):
sys.exit(1)
if args.reverse:
- items = mappings.values()
+ items = list(mappings.values())
else:
items = []
for pkg in pkgs:
@@ -274,6 +274,61 @@ def lookup_recipe(args):
items.extend(mappings.get(pkg, []))
print('\n'.join(items))
+def package_info(args):
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ packages = []
+ if args.file:
+ with open(args.file, 'r') as f:
+ for line in f:
+ splitline = line.split()
+ if splitline:
+ packages.append(splitline[0])
+ else:
+ for pkgitem in args.pkg:
+ packages.extend(pkgitem.split())
+ if not packages:
+ logger.error("No packages specified")
+ sys.exit(1)
+
+ mappings = defaultdict(lambda: defaultdict(str))
+ for pkg in packages:
+ pkgfile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
+ if os.path.exists(pkgfile):
+ with open(pkgfile, 'r') as f:
+ for line in f:
+ fields = line.rstrip().split(': ')
+ if fields[0].endswith("_" + pkg):
+ k = fields[0][:len(fields[0]) - len(pkg) - 1]
+ else:
+ k = fields[0]
+ v = fields[1] if len(fields) == 2 else ""
+ mappings[pkg][k] = v
+
+ if len(mappings) < len(packages):
+ missing = list(set(packages) - set(mappings.keys()))
+ logger.error("The following packages could not be found: %s" %
+ ', '.join(missing))
+ sys.exit(1)
+
+ items = []
+ for pkg in packages:
+ pkg_version = mappings[pkg]['PKGV']
+ if mappings[pkg]['PKGE']:
+ pkg_version = mappings[pkg]['PKGE'] + ":" + pkg_version
+ if mappings[pkg]['PKGR']:
+ pkg_version = pkg_version + "-" + mappings[pkg]['PKGR']
+ recipe = mappings[pkg]['PN']
+ recipe_version = mappings[pkg]['PV']
+ if mappings[pkg]['PE']:
+ recipe_version = mappings[pkg]['PE'] + ":" + recipe_version
+ if mappings[pkg]['PR']:
+ recipe_version = recipe_version + "-" + mappings[pkg]['PR']
+ pkg_size = mappings[pkg]['PKGSIZE']
+
+ items.append("%s %s %s %s %s" %
+ (pkg, pkg_version, recipe, recipe_version, pkg_size))
+ print('\n'.join(items))
+
def get_recipe_pkgs(pkgdata_dir, recipe, unpackaged):
recipedatafile = os.path.join(pkgdata_dir, recipe)
if not os.path.exists(recipedatafile):
@@ -437,6 +492,7 @@ def main():
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
parser.add_argument('-p', '--pkgdata-dir', help='Path to pkgdata directory (determined automatically if not specified)')
subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
parser_lookup_pkg = subparsers.add_parser('lookup-pkg',
help='Translate between recipe-space package names and runtime package names',
@@ -469,6 +525,13 @@ def main():
parser_lookup_recipe.add_argument('pkg', nargs='+', help='Runtime package name to look up')
parser_lookup_recipe.set_defaults(func=lookup_recipe)
+ parser_package_info = subparsers.add_parser('package-info',
+ help='Shows version, recipe and size information for one or more packages',
+ description='Looks up the specified runtime package(s) and display information')
+ parser_package_info.add_argument('pkg', nargs='*', help='Runtime package name to look up')
+ parser_package_info.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
+ parser_package_info.set_defaults(func=package_info)
+
parser_find_path = subparsers.add_parser('find-path',
help='Find package providing a target path',
description='Finds the recipe-space package providing the specified target path')
@@ -506,7 +569,10 @@ def main():
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
tinfoil = tinfoil_init()
- args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ try:
+ args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+ finally:
+ tinfoil.shutdown()
logger.debug('Value of PKGDATA_DIR is "%s"' % args.pkgdata_dir)
if not args.pkgdata_dir:
logger.error('Unable to determine pkgdata directory from PKGDATA_DIR')
diff --git a/import-layers/yocto-poky/scripts/oe-publish-sdk b/import-layers/yocto-poky/scripts/oe-publish-sdk
index 55872f2f9..4fe8974de 100755
--- a/import-layers/yocto-poky/scripts/oe-publish-sdk
+++ b/import-layers/yocto-poky/scripts/oe-publish-sdk
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# OpenEmbedded SDK publishing tool
diff --git a/import-layers/yocto-poky/scripts/oe-run-native b/import-layers/yocto-poky/scripts/oe-run-native
new file mode 100755
index 000000000..496e34f70
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-run-native
@@ -0,0 +1,48 @@
+#!/bin/sh
+#
+# Copyright (c) 2016, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+#
+
+#
+# This script is for running tools from native oe sysroot
+#
+
+if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+ echo "Usage: $0 <native tool> [parameters]"
+ exit 1
+fi
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find oe-find-native-sysroot script"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT
+
+OLDPATH=$PATH
+
+# look for a tool only in native sysroot
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin
+tool=`/usr/bin/which $1 2>/dev/null`
+
+if [ -n "$tool" ] ; then
+ # add old path to allow usage of host tools
+ PATH=$PATH:$OLD_PATH $@
+else
+ echo "Error: Unable to find '$1' in native sysroot"
+ exit 1
+fi
diff --git a/import-layers/yocto-poky/scripts/oe-selftest b/import-layers/yocto-poky/scripts/oe-selftest
index 5e23ef003..d9ffd40e8 100755
--- a/import-layers/yocto-poky/scripts/oe-selftest
+++ b/import-layers/yocto-poky/scripts/oe-selftest
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (c) 2013 Intel Corporation
#
@@ -34,6 +34,8 @@ import subprocess
import time as t
import re
import fnmatch
+import collections
+import imp
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
import scriptpath
@@ -46,8 +48,19 @@ import oeqa.utils.ftools as ftools
from oeqa.utils.commands import runCmd, get_bb_var, get_test_layer
from oeqa.selftest.base import oeSelfTest, get_available_machines
+try:
+ import xmlrunner
+ from xmlrunner.result import _XMLTestResult as TestResult
+ from xmlrunner import XMLTestRunner as _TestRunner
+except ImportError:
+ # use the base runner instead
+ from unittest import TextTestResult as TestResult
+ from unittest import TextTestRunner as _TestRunner
+
+log_prefix = "oe-selftest-" + t.strftime("%Y%m%d-%H%M%S")
+
def logger_create():
- log_file = "oe-selftest-" + t.strftime("%Y-%m-%d_%H:%M:%S") + ".log"
+ log_file = log_prefix + ".log"
if os.path.exists("oe-selftest.log"): os.remove("oe-selftest.log")
os.symlink(log_file, "oe-selftest.log")
@@ -211,7 +224,7 @@ def get_tests_from_module(tmod):
try:
import importlib
modlib = importlib.import_module(tmod)
- for mod in vars(modlib).values():
+ for mod in list(vars(modlib).values()):
if isinstance(mod, type(oeSelfTest)) and issubclass(mod, oeSelfTest) and mod is not oeSelfTest:
for test in dir(mod):
if test.startswith('test_') and hasattr(vars(mod)[test], '__call__'):
@@ -220,12 +233,12 @@ def get_tests_from_module(tmod):
try:
tid = vars(mod)[test].test_case
except:
- print 'DEBUG: tc id missing for ' + str(test)
+ print('DEBUG: tc id missing for ' + str(test))
tid = None
try:
ttag = vars(mod)[test].tag__feature
except:
- # print 'DEBUG: feature tag missing for ' + str(test)
+ # print('DEBUG: feature tag missing for ' + str(test))
ttag = None
# NOTE: for some reason lstrip() doesn't work for mod.__module__
@@ -260,16 +273,22 @@ def get_testsuite_by(criteria, keyword):
result = []
remaining = values[:]
for key in keyword:
+ found = False
if key in remaining:
# Regular matching of exact item
result.append(key)
remaining.remove(key)
+ found = True
else:
# Wildcard matching
pattern = re.compile(fnmatch.translate(r"%s" % key))
added = [x for x in remaining if pattern.match(x)]
- result.extend(added)
- remaining = [x for x in remaining if x not in added]
+ if added:
+ result.extend(added)
+ remaining = [x for x in remaining if x not in added]
+ found = True
+ if not found:
+ log.error("Failed to find test: %s" % key)
return result
@@ -320,17 +339,17 @@ def list_testsuite_by(criteria, keyword):
ts = sorted([ (tc.tcid, tc.tctag, tc.tcname, tc.tcclass, tc.tcmodule) for tc in get_testsuite_by(criteria, keyword) ])
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % ('id', 'tag', 'name', 'class', 'module')
- print '_' * 150
+ print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % ('id', 'tag', 'name', 'class', 'module'))
+ print('_' * 150)
for t in ts:
if isinstance(t[1], (tuple, list)):
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t[0], ', '.join(t[1]), t[2], t[3], t[4])
+ print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t[0], ', '.join(t[1]), t[2], t[3], t[4]))
else:
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % t
- print '_' * 150
- print 'Filtering by:\t %s' % criteria
- print 'Looking for:\t %s' % ', '.join(str(x) for x in keyword)
- print 'Total found:\t %s' % len(ts)
+ print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % t)
+ print('_' * 150)
+ print('Filtering by:\t %s' % criteria)
+ print('Looking for:\t %s' % ', '.join(str(x) for x in keyword))
+ print('Total found:\t %s' % len(ts))
def list_tests():
@@ -338,16 +357,15 @@ def list_tests():
ts = get_all_tests()
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % ('id', 'tag', 'name', 'class', 'module')
- print '_' * 150
+ print('%-4s\t%-10s\t%-50s' % ('id', 'tag', 'test'))
+ print('_' * 80)
for t in ts:
if isinstance(t.tctag, (tuple, list)):
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t.tcid, ', '.join(t.tctag), t.tcname, t.tcclass, t.tcmodule)
+ print('%-4s\t%-10s\t%-50s' % (t.tcid, ', '.join(t.tctag), '.'.join([t.tcmodule, t.tcclass, t.tcname])))
else:
- print '%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % (t.tcid, t.tctag, t.tcname, t.tcclass, t.tcmodule)
- print '_' * 150
- print 'Total found:\t %s' % len(ts)
-
+ print('%-4s\t%-10s\t%-50s' % (t.tcid, t.tctag, '.'.join([t.tcmodule, t.tcclass, t.tcname])))
+ print('_' * 80)
+ print('Total found:\t %s' % len(ts))
def list_tags():
# Get all tags set to test cases
@@ -362,7 +380,7 @@ def list_tags():
else:
tags.add(tc.tctag)
- print 'Tags:\t%s' % ', '.join(str(x) for x in tags)
+ print('Tags:\t%s' % ', '.join(str(x) for x in tags))
def coverage_setup(coverage_source, coverage_include, coverage_omit):
""" Set up the coverage measurement for the testcases to be run """
@@ -370,7 +388,7 @@ def coverage_setup(coverage_source, coverage_include, coverage_omit):
import subprocess
builddir = os.environ.get("BUILDDIR")
pokydir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- curcommit= subprocess.check_output(["git", "--git-dir", os.path.join(pokydir, ".git"), "rev-parse", "HEAD"])
+ curcommit= subprocess.check_output(["git", "--git-dir", os.path.join(pokydir, ".git"), "rev-parse", "HEAD"]).decode('utf-8')
coveragerc = "%s/.coveragerc" % builddir
data_file = "%s/.coverage." % builddir
data_file += datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
@@ -415,7 +433,7 @@ def coverage_report():
# Coverage under version 4 uses coverage.coverage
from coverage import coverage as Coverage
- import cStringIO as StringIO
+ import io as StringIO
from coverage.misc import CoverageException
cov_output = StringIO.StringIO()
@@ -443,22 +461,24 @@ def main():
bbpath = get_bb_var('BBPATH').split(':')
layer_libdirs = [p for p in (os.path.join(l, 'lib') for l in bbpath) if os.path.exists(p)]
sys.path.extend(layer_libdirs)
- reload(oeqa.selftest)
+ imp.reload(oeqa.selftest)
if args.run_tests_by and len(args.run_tests_by) >= 2:
valid_options = ['name', 'class', 'module', 'id', 'tag']
if args.run_tests_by[0] not in valid_options:
- print '--run-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.run_tests_by[0]
+ print('--run-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.run_tests_by[0])
return 1
else:
criteria = args.run_tests_by[0]
keyword = args.run_tests_by[1:]
ts = sorted([ tc.fullpath for tc in get_testsuite_by(criteria, keyword) ])
+ if not ts:
+ return 1
if args.list_tests_by and len(args.list_tests_by) >= 2:
valid_options = ['name', 'class', 'module', 'id', 'tag']
if args.list_tests_by[0] not in valid_options:
- print '--list-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.list_tests_by[0]
+ print('--list-tests-by %s not a valid option. Choose one of <name|class|module|id|tag>.' % args.list_tests_by[0])
return 1
else:
criteria = args.list_tests_by[0]
@@ -482,7 +502,7 @@ def main():
info = ''
if module.startswith('_'):
info = ' (hidden)'
- print module + info
+ print(module + info)
if args.list_allclasses:
try:
import importlib
@@ -490,13 +510,13 @@ def main():
for v in vars(modlib):
t = vars(modlib)[v]
if isinstance(t, type(oeSelfTest)) and issubclass(t, oeSelfTest) and t!=oeSelfTest:
- print " --", v
+ print(" --", v)
for method in dir(t):
- if method.startswith("test_") and callable(vars(t)[method]):
- print " -- --", method
+ if method.startswith("test_") and isinstance(vars(t)[method], collections.Callable):
+ print(" -- --", method)
except (AttributeError, ImportError) as e:
- print e
+ print(e)
pass
if args.run_tests or args.run_all_tests or args.run_tests_by:
@@ -511,7 +531,8 @@ def main():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
- runner = unittest.TextTestRunner(verbosity=2, resultclass=buildResultClass(args))
+ runner = TestRunner(verbosity=2,
+ resultclass=buildResultClass(args))
# we need to do this here, otherwise just loading the tests
# will take 2 minutes (bitbake -e calls)
oeSelfTest.testlayer_path = get_test_layer()
@@ -552,7 +573,7 @@ def buildResultClass(args):
"""Build a Result Class to use in the testcase execution"""
import site
- class StampedResult(unittest.TextTestResult):
+ class StampedResult(TestResult):
"""
Custom TestResult that prints the time when a test starts. As oe-selftest
can take a long time (ie a few hours) to run, timestamps help us understand
@@ -584,6 +605,10 @@ def buildResultClass(args):
if self.coverage_installed:
log.info("Coverage is enabled")
+ major_version = int(coverage.version.__version__[0])
+ if major_version < 4:
+ log.error("python coverage %s installed. Require version 4 or greater." % coverage.version.__version__)
+ self.stop()
# In case the user has not set the variable COVERAGE_PROCESS_START,
# create a default one and export it. The COVERAGE_PROCESS_START
# value indicates where the coverage configuration file resides
@@ -622,6 +647,21 @@ def buildResultClass(args):
return StampedResult
+class TestRunner(_TestRunner):
+ """Test runner class aware of exporting tests."""
+ def __init__(self, *args, **kwargs):
+ try:
+ exportdir = os.path.join(os.getcwd(), log_prefix)
+ kwargsx = dict(**kwargs)
+ # argument specific to XMLTestRunner, if adding a new runner then
+ # also add logic to use other runner's args.
+ kwargsx['output'] = exportdir
+ kwargsx['descriptions'] = False
+ # done for the case where telling the runner where to export
+ super(TestRunner, self).__init__(*args, **kwargsx)
+ except TypeError:
+ log.info("test runner init'ed like unittest")
+ super(TestRunner, self).__init__(*args, **kwargs)
if __name__ == "__main__":
try:
diff --git a/import-layers/yocto-poky/scripts/oe-trim-schemas b/import-layers/yocto-poky/scripts/oe-trim-schemas
index 29fb3a1b6..66a1b8d81 100755
--- a/import-layers/yocto-poky/scripts/oe-trim-schemas
+++ b/import-layers/yocto-poky/scripts/oe-trim-schemas
@@ -1,4 +1,4 @@
-#! /usr/bin/env python
+#! /usr/bin/env python3
import sys
try:
diff --git a/import-layers/yocto-poky/scripts/oepydevshell-internal.py b/import-layers/yocto-poky/scripts/oepydevshell-internal.py
index f7b2e4e0b..a22bec336 100755
--- a/import-layers/yocto-poky/scripts/oepydevshell-internal.py
+++ b/import-layers/yocto-poky/scripts/oepydevshell-internal.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import os
import sys
@@ -29,9 +29,6 @@ if len(sys.argv) != 3:
pty = open(sys.argv[1], "w+b", 0)
parent = int(sys.argv[2])
-# Don't buffer output by line endings
-sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
-sys.stdin = os.fdopen(sys.stdin.fileno(), 'r', 0)
nonblockingfd(pty)
nonblockingfd(sys.stdin)
@@ -50,7 +47,7 @@ try:
# Need cbreak/noecho whilst in select so we trigger on any keypress
cbreaknoecho(sys.stdin.fileno())
# Send our PID to the other end so they can kill us.
- pty.write(str(os.getpid()) + "\n")
+ pty.write(str(os.getpid()).encode('utf-8') + b"\n")
while True:
try:
writers = []
@@ -59,17 +56,18 @@ try:
(ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
try:
if pty in ready:
- i = i + pty.read()
+ i = i + pty.read().decode('utf-8')
if i:
# Write a page at a time to avoid overflowing output
# d.keys() is a good way to do that
sys.stdout.write(i[:4096])
+ sys.stdout.flush()
i = i[4096:]
if sys.stdin in ready:
echonocbreak(sys.stdin.fileno())
- o = raw_input()
+ o = input().encode('utf-8')
cbreaknoecho(sys.stdin.fileno())
- pty.write(o + "\n")
+ pty.write(o + b"\n")
except (IOError, OSError) as e:
if e.errno == 11:
continue
diff --git a/import-layers/yocto-poky/scripts/opkg-query-helper.py b/import-layers/yocto-poky/scripts/opkg-query-helper.py
index 2fb1a7897..ce89491f6 100755
--- a/import-layers/yocto-poky/scripts/opkg-query-helper.py
+++ b/import-layers/yocto-poky/scripts/opkg-query-helper.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# OpenEmbedded opkg query helper utility
#
diff --git a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/main.py.in b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/main.py.in
index 21bb0be3a..a954b125d 100644
--- a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/main.py.in
+++ b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/main.py.in
@@ -16,8 +16,6 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-from __future__ import print_function
-
import sys
import os
import optparse
@@ -149,9 +147,7 @@ def main(argv=None):
for time in res[4]:
if time is not None:
# output as ms
- print(time * 10, file=f)
- else:
- print(file=f)
+ f.write(time * 10)
finally:
f.close()
filename = _get_filename(options.output)
diff --git a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
index d423b9f77..a3a0b0b33 100644
--- a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -13,9 +13,6 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import with_statement
-
import os
import string
import re
diff --git a/import-layers/yocto-poky/scripts/pythondeps b/import-layers/yocto-poky/scripts/pythondeps
index ff92e747e..590b9769e 100755
--- a/import-layers/yocto-poky/scripts/pythondeps
+++ b/import-layers/yocto-poky/scripts/pythondeps
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Determine dependencies of python scripts or available python modules in a search path.
#
@@ -187,7 +187,7 @@ def get_depends_recursive(directory):
directory = os.path.realpath(directory)
provides = dict((v, k) for k, v in get_provides(directory))
- for filename, provide in provides.iteritems():
+ for filename, provide in provides.items():
if os.path.isdir(filename):
filename = os.path.join(filename, '__init__.py')
ispkg = True
diff --git a/import-layers/yocto-poky/scripts/recipetool b/import-layers/yocto-poky/scripts/recipetool
index 6c6648756..1052cd2b2 100755
--- a/import-layers/yocto-poky/scripts/recipetool
+++ b/import-layers/yocto-poky/scripts/recipetool
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Recipe creation tool
#
@@ -60,6 +60,7 @@ def main():
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
if global_args.debug:
logger.setLevel(logging.DEBUG)
@@ -76,37 +77,40 @@ def main():
scriptutils.logger_setup_color(logger, global_args.color)
tinfoil = tinfoil_init(False)
- for path in ([scripts_path] +
- tinfoil.config_data.getVar('BBPATH', True).split(':')):
- pluginpath = os.path.join(path, 'lib', 'recipetool')
- scriptutils.load_plugins(logger, plugins, pluginpath)
-
- registered = False
- for plugin in plugins:
- if hasattr(plugin, 'register_commands'):
- registered = True
- plugin.register_commands(subparsers)
- elif hasattr(plugin, 'register_command'):
- # Legacy function name
- registered = True
- plugin.register_command(subparsers)
- if hasattr(plugin, 'tinfoil_init'):
- plugin.tinfoil_init(tinfoil)
-
- if not registered:
- logger.error("No commands registered - missing plugins?")
- sys.exit(1)
-
- args = parser.parse_args(unparsed_args, namespace=global_args)
-
try:
- if getattr(args, 'parserecipes', False):
- tinfoil.config_data.disableTracking()
- tinfoil.parseRecipes()
- tinfoil.config_data.enableTracking()
- ret = args.func(args)
- except bb.BBHandledException:
- ret = 1
+ for path in ([scripts_path] +
+ tinfoil.config_data.getVar('BBPATH', True).split(':')):
+ pluginpath = os.path.join(path, 'lib', 'recipetool')
+ scriptutils.load_plugins(logger, plugins, pluginpath)
+
+ registered = False
+ for plugin in plugins:
+ if hasattr(plugin, 'register_commands'):
+ registered = True
+ plugin.register_commands(subparsers)
+ elif hasattr(plugin, 'register_command'):
+ # Legacy function name
+ registered = True
+ plugin.register_command(subparsers)
+ if hasattr(plugin, 'tinfoil_init'):
+ plugin.tinfoil_init(tinfoil)
+
+ if not registered:
+ logger.error("No commands registered - missing plugins?")
+ sys.exit(1)
+
+ args = parser.parse_args(unparsed_args, namespace=global_args)
+
+ try:
+ if getattr(args, 'parserecipes', False):
+ tinfoil.config_data.disableTracking()
+ tinfoil.parseRecipes()
+ tinfoil.config_data.enableTracking()
+ ret = args.func(args)
+ except bb.BBHandledException:
+ ret = 1
+ finally:
+ tinfoil.shutdown()
return ret
diff --git a/import-layers/yocto-poky/scripts/relocate_sdk.py b/import-layers/yocto-poky/scripts/relocate_sdk.py
index 99fca86a1..e47b4d916 100755
--- a/import-layers/yocto-poky/scripts/relocate_sdk.py
+++ b/import-layers/yocto-poky/scripts/relocate_sdk.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (c) 2012 Intel Corporation
#
diff --git a/import-layers/yocto-poky/scripts/runqemu b/import-layers/yocto-poky/scripts/runqemu
index d7fa941a6..dbe17abfc 100755
--- a/import-layers/yocto-poky/scripts/runqemu
+++ b/import-layers/yocto-poky/scripts/runqemu
@@ -1,8 +1,9 @@
-#!/bin/bash
-#
+#!/usr/bin/env python3
+
# Handle running OE images standalone with QEMU
#
# Copyright (C) 2006-2011 Linux Foundation
+# Copyright (c) 2016 Wind River Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -17,530 +18,1010 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-usage() {
- MYNAME=`basename $0`
-cat <<_EOF
+import os
+import sys
+import logging
+import subprocess
+import re
+import fcntl
+import shutil
+import glob
+import configparser
+
+class OEPathError(Exception):
+ """Custom Exception to give better guidance on missing binaries"""
+ def __init__(self, message):
+ self.message = "In order for this script to dynamically infer paths\n \
+kernels or filesystem images, you either need bitbake in your PATH\n \
+or to source oe-init-build-env before running this script.\n\n \
+Dynamic path inference can be avoided by passing a *.qemuboot.conf to\n \
+runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message
+
+
+def create_logger():
+ logger = logging.getLogger('runqemu')
+ logger.setLevel(logging.INFO)
+
+ # create console handler and set level to debug
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.INFO)
+
+ # create formatter
+ formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
+
+ # add formatter to ch
+ ch.setFormatter(formatter)
+ # add ch to logger
+ logger.addHandler(ch)
+
+ return logger
+
+logger = create_logger()
+
+def print_usage():
+ print("""
Usage: you can run this script with any valid combination
of the following environment variables (in any order):
KERNEL - the kernel image file to use
ROOTFS - the rootfs image file or nfsroot directory to use
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
- nographic - disables video console
- serial - enables a serial console on /dev/ttyS0
- kvm - enables KVM when running qemux86/qemux86-64 (VT-capable CPU required)
- kvm-vhost - enables KVM with vhost support when running qemux86/qemux86-64 (VT-capable CPU required)
+ nographic - disable video console
+ serial - enable a serial console on /dev/ttyS0
+ slirp - enable user networking, no root privileges is required
+ kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
+ kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
- qemuparams="xyz" - specify custom parameters to QEMU
- bootparams="xyz" - specify custom kernel parameters during boot
+ audio - enable audio
+ tcpserial=<port> - specify tcp serial port number
+ biosdir=<dir> - specify custom bios dir
+ biosfilename=<filename> - specify bios filename
+ qemuparams=<xyz> - specify custom parameters to QEMU
+ bootparams=<xyz> - specify custom kernel parameters during boot
+ help: print this text
Examples:
- $MYNAME qemuarm
- $MYNAME qemux86-64 core-image-sato ext4
- $MYNAME qemux86-64 wic-image-minimal wic
- $MYNAME path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
- $MYNAME qemux86 iso/hddimg/vmdk/qcow2/vdi/ramfs/cpio.gz...
- $MYNAME qemux86 qemuparams="-m 256"
- $MYNAME qemux86 bootparams="psplash=false"
- $MYNAME path/to/<image>-<machine>.vmdk
- $MYNAME path/to/<image>-<machine>.wic
-_EOF
- exit 1
-}
-
-if [ "x$1" = "x" ]; then
- usage
-fi
-
-error() {
- echo "Error: "$*
- usage
-}
-
-MACHINE=${MACHINE:=""}
-KERNEL=${KERNEL:=""}
-ROOTFS=${ROOTFS:=""}
-FSTYPE=${FSTYPE:=""}
-LAZY_ROOTFS=""
-SCRIPT_QEMU_OPT=""
-SCRIPT_QEMU_EXTRA_OPT=""
-SCRIPT_KERNEL_OPT=""
-SERIALSTDIO=""
-TCPSERIAL_PORTNUM=""
-KVM_ENABLED="no"
-KVM_ACTIVE="no"
-VHOST_ENABLED="no"
-VHOST_ACTIVE="no"
-IS_VM="false"
-
-# Determine whether the file is a kernel or QEMU image, and set the
-# appropriate variables
-process_filename() {
- filename=$1
-
- # Extract the filename extension
- EXT=`echo $filename | awk -F . '{ print \$NF }'`
- case /$EXT/ in
- /bin/)
- # A file ending in .bin is a kernel
- [ -z "$KERNEL" ] && KERNEL=$filename || \
- error "conflicting KERNEL args [$KERNEL] and [$filename]"
- ;;
- /ext[234]/|/jffs2/|/btrfs/)
- # A file ending in a supportted fs type is a rootfs image
- if [ -z "$FSTYPE" -o "$FSTYPE" = "$EXT" ]; then
- FSTYPE=$EXT
- ROOTFS=$filename
- else
- error "conflicting FSTYPE types [$FSTYPE] and [$EXT]"
- fi
- ;;
- /hddimg/|/hdddirect/|/vmdk/|/wic/|/qcow2/|/vdi/)
- FSTYPE=$EXT
- VM=$filename
- ROOTFS=$filename
- IS_VM="true"
- ;;
- *)
- error "unknown file arg [$filename]"
- ;;
- esac
-}
-
-check_fstype_conflicts() {
- if [ -z "$FSTYPE" -o "$FSTYPE" = "$1" ]; then
- FSTYPE=$1
- else
- error "conflicting FSTYPE types [$FSTYPE] and [$1]"
- fi
-}
-# Parse command line args without requiring specific ordering. It's a
-# bit more complex, but offers a great user experience.
-while true; do
- arg=${1}
- case "$arg" in
- "qemux86" | "qemux86-64" | "qemuarm" | "qemuarm64" | "qemumips" | "qemumipsel" | \
- "qemumips64" | "qemush4" | "qemuppc" | "qemumicroblaze" | "qemuzynq")
- [ -z "$MACHINE" -o "$MACHINE" = "$arg" ] && MACHINE=$arg || \
- error "conflicting MACHINE types [$MACHINE] and [$arg]"
- ;;
- "ext"[234] | "jffs2" | "nfs" | "btrfs")
- check_fstype_conflicts $arg
- ;;
- "hddimg" | "hdddirect" | "wic" | "vmdk" | "qcow2" | "vdi" | "iso")
- check_fstype_conflicts $arg
- IS_VM="true"
- ;;
- "ramfs" | "cpio.gz")
- FSTYPE=cpio.gz
- ;;
- "nographic")
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -nographic"
- SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0"
- ;;
- "serial")
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -serial stdio"
- SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0"
- SERIALSTDIO="1"
- ;;
- "tcpserial="*)
- TCPSERIAL_PORTNUM=${arg##tcpserial=}
- ;;
- "biosdir="*)
- CUSTOMBIOSDIR="${arg##biosdir=}"
- ;;
- "biosfilename="*)
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -bios ${arg##biosfilename=}"
- ;;
- "qemuparams="*)
- SCRIPT_QEMU_EXTRA_OPT="${arg##qemuparams=}"
-
- # Warn user if they try to specify serial or kvm options
- # to use simplified options instead
- serial_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-serial\)'`
- kvm_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-enable-kvm\)'`
- vga_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-vga\)'`
- [ ! -z "$serial_option" -o ! -z "$kvm_option" ] && \
- echo "Please use simplified serial or kvm options instead"
- ;;
- "bootparams="*)
- SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT ${arg##bootparams=}"
- ;;
- "audio")
- if [ "x$MACHINE" = "xqemux86" -o "x$MACHINE" = "xqemux86-64" ]; then
- echo "Enabling audio in qemu."
- echo "Please install snd_intel8x0 or snd_ens1370 driver in linux guest."
- QEMU_AUDIO_DRV="alsa"
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -soundhw ac97,es1370"
- fi
- ;;
- "kvm")
- KVM_ENABLED="yes"
- KVM_CAPABLE=`grep -q 'vmx\|svm' /proc/cpuinfo && echo 1`
- ;;
- "kvm-vhost")
- KVM_ENABLED="yes"
- KVM_CAPABLE=`grep -q 'vmx\|svm' /proc/cpuinfo && echo 1`
- VHOST_ENABLED="yes"
- ;;
- "slirp")
- SLIRP_ENABLED="yes"
- ;;
- "publicvnc")
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -vnc :0"
- ;;
- *-image*)
- [ -z "$ROOTFS" ] || \
- error "conflicting ROOTFS args [$ROOTFS] and [$arg]"
- if [ -f "$arg" ]; then
- process_filename $arg
- elif [ -d "$arg" ]; then
- # Handle the case where the nfsroot dir has -image-
- # in the pathname
- echo "Assuming $arg is an nfs rootfs"
- FSTYPE=nfs
- ROOTFS=$arg
- else
- ROOTFS=$arg
- LAZY_ROOTFS="true"
- fi
- ;;
- "") break ;;
- *)
- # A directory name is an nfs rootfs
- if [ -d "$arg" ]; then
- echo "Assuming $arg is an nfs rootfs"
- if [ -z "$FSTYPE" -o "$FSTYPE" = "nfs" ]; then
- FSTYPE=nfs
- else
- error "conflicting FSTYPE types [$arg] and nfs"
- fi
-
- if [ -z "$ROOTFS" ]; then
- ROOTFS=$arg
- else
- error "conflicting ROOTFS args [$ROOTFS] and [$arg]"
- fi
- elif [ -f "$arg" ]; then
- process_filename $arg
- else
- error "unable to classify arg [$arg]"
- fi
- ;;
- esac
- shift
-done
-
-if [ ! -c /dev/net/tun ] ; then
- echo "TUN control device /dev/net/tun is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)"
- exit 1
-elif [ ! -w /dev/net/tun ] ; then
- echo "TUN control device /dev/net/tun is not writable, please fix (e.g. sudo chmod 666 /dev/net/tun)"
- exit 1
-fi
-
-# Report errors for missing combinations of options
-if [ -z "$MACHINE" -a -z "$KERNEL" -a -z "$VM" -a "$FSTYPE" != "wic" ]; then
- error "you must specify at least a MACHINE or KERNEL argument"
-fi
-if [ "$FSTYPE" = "nfs" -a -z "$ROOTFS" ]; then
- error "NFS booting without an explicit ROOTFS path is not yet supported"
-fi
-
-if [ -z "$MACHINE" ]; then
- if [ "$IS_VM" = "true" ]; then
- [ "x$FSTYPE" = "xwic" ] && filename=$ROOTFS || filename=$VM
- MACHINE=`basename $filename | sed -n 's/.*\(qemux86-64\|qemux86\|qemuarm64\|qemuarm\|qemumips64\|qemumips\|qemuppc\|qemush4\).*/\1/p'`
- if [ -z "$MACHINE" ]; then
- error "Unable to set MACHINE from image filename [$VM]"
- fi
- echo "Set MACHINE to [$MACHINE] based on image [$VM]"
- else
- MACHINE=`basename $KERNEL | sed -n 's/.*\(qemux86-64\|qemux86\|qemuarm64\|qemuarm\|qemumips64\|qemumips\|qemuppc\|qemush4\).*/\1/p'`
- if [ -z "$MACHINE" ]; then
- error "Unable to set MACHINE from kernel filename [$KERNEL]"
- fi
- echo "Set MACHINE to [$MACHINE] based on kernel [$KERNEL]"
- fi
-fi
-
-YOCTO_KVM_WIKI="https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
-YOCTO_PARAVIRT_KVM_WIKI="https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
-# Detect KVM configuration
-if [ "x$KVM_ENABLED" = "xyes" ]; then
- if [ -z "$KVM_CAPABLE" ]; then
- echo "You are trying to enable KVM on a cpu without VT support."
- echo "Remove kvm from the command-line, or refer"
- echo "$YOCTO_KVM_WIKI";
- exit 1;
- fi
- if [ "x$MACHINE" != "xqemux86" -a "x$MACHINE" != "xqemux86-64" ]; then
- echo "KVM only support x86 & x86-64. Remove kvm from the command-line";
- exit 1;
- fi
- if [ ! -e /dev/kvm ]; then
- echo "Missing KVM device. Have you inserted kvm modules?"
- echo "For further help see:"
- echo "$YOCTO_KVM_WIKI";
- exit 1;
- fi
- if [ -w /dev/kvm -a -r /dev/kvm ]; then
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -enable-kvm"
- KVM_ACTIVE="yes"
- else
- echo "You have no rights on /dev/kvm."
- echo "Please change the ownership of this file as described at:"
- echo "$YOCTO_KVM_WIKI";
- exit 1;
- fi
- if [ "x$VHOST_ENABLED" = "xyes" ]; then
- if [ ! -e /dev/vhost-net ]; then
- echo "Missing virtio net device. Have you inserted vhost-net module?"
- echo "For further help see:"
- echo "$YOCTO_PARAVIRT_KVM_WIKI";
- exit 1;
- fi
-
- if [ -w /dev/vhost-net -a -r /dev/vhost-net ]; then
- VHOST_ACTIVE="yes"
- else
- echo "You have no rights on /dev/vhost-net."
- echo "Please change the ownership of this file as described at:"
- echo "$YOCTO_KVM_WIKI";
- exit 1;
- fi
- fi
-fi
-
-machine2=`echo $MACHINE | tr 'a-z' 'A-Z' | sed 's/-/_/'`
-# MACHINE is now set for all cases
-
-# Defaults used when these vars need to be inferred
-QEMUX86_DEFAULT_KERNEL=bzImage-qemux86.bin
-QEMUX86_DEFAULT_FSTYPE=ext4
-
-QEMUX86_64_DEFAULT_KERNEL=bzImage-qemux86-64.bin
-QEMUX86_64_DEFAULT_FSTYPE=ext4
-
-QEMUARM_DEFAULT_KERNEL=zImage-qemuarm.bin
-QEMUARM_DEFAULT_FSTYPE=ext4
-
-QEMUARM64_DEFAULT_KERNEL=Image-qemuarm64.bin
-QEMUARM64_DEFAULT_FSTYPE=ext4
-
-QEMUMIPS_DEFAULT_KERNEL=vmlinux-qemumips.bin
-QEMUMIPS_DEFAULT_FSTYPE=ext4
-
-QEMUMIPSEL_DEFAULT_KERNEL=vmlinux-qemumipsel.bin
-QEMUMIPSEL_DEFAULT_FSTYPE=ext4
-
-QEMUMIPS64_DEFAULT_KERNEL=vmlinux-qemumips64.bin
-QEMUMIPS64_DEFAULT_FSTYPE=ext4
-
-QEMUSH4_DEFAULT_KERNEL=vmlinux-qemumips.bin
-QEMUSH4_DEFAULT_FSTYPE=ext4
-
-QEMUPPC_DEFAULT_KERNEL=vmlinux-qemuppc.bin
-QEMUPPC_DEFAULT_FSTYPE=ext4
-
-QEMUMICROBLAZE_DEFAULT_KERNEL=linux.bin.ub
-QEMUMICROBLAZE_DEFAULT_FSTYPE=cpio
-
-QEMUZYNQ_DEFAULT_KERNEL=uImage
-QEMUZYNQ_DEFAULT_FSTYPE=cpio
-
-setup_path_vars() {
- if [ -z "$OE_TMPDIR" ] ; then
- PATHS_REQUIRED=true
- elif [ "$1" = "1" -a -z "$DEPLOY_DIR_IMAGE" ] ; then
- PATHS_REQUIRED=true
- else
- PATHS_REQUIRED=false
- fi
-
- if [ "$PATHS_REQUIRED" = "true" ]; then
- # Try to get the variable values from bitbake
- type -P bitbake &>/dev/null || {
- echo "In order for this script to dynamically infer paths";
- echo "to kernels or filesystem images, you either need";
- echo "bitbake in your PATH or to source oe-init-build-env";
- echo "before running this script" >&2;
- exit 1; }
-
- # We have bitbake in PATH, get the variable values from bitbake
- BITBAKE_ENV_TMPFILE=`mktemp --tmpdir runqemu.XXXXXXXXXX`
- if [ "$?" != "0" ] ; then
- echo "Error: mktemp failed for bitbake environment output"
- exit 1
- fi
-
- MACHINE=$MACHINE bitbake -e > $BITBAKE_ENV_TMPFILE
- if [ -z "$OE_TMPDIR" ] ; then
- OE_TMPDIR=`sed -n 's/^TMPDIR=\"\(.*\)\"/\1/p' $BITBAKE_ENV_TMPFILE`
- fi
- if [ -z "$DEPLOY_DIR_IMAGE" ] ; then
- DEPLOY_DIR_IMAGE=`sed -n 's/^DEPLOY_DIR_IMAGE=\"\(.*\)\"/\1/p' $BITBAKE_ENV_TMPFILE`
- fi
- if [ -z "$OE_TMPDIR" ]; then
- # Check for errors from bitbake that the user needs to know about
- BITBAKE_OUTPUT=`cat $BITBAKE_ENV_TMPFILE | wc -l`
- if [ "$BITBAKE_OUTPUT" -eq "0" ]; then
- echo "Error: this script needs to be run from your build directory, or you need"
- echo "to explicitly set OE_TMPDIR and DEPLOY_DIR_IMAGE in your environment"
- else
- echo "There was an error running bitbake to determine TMPDIR"
- echo "Here is the output from 'bitbake -e':"
- cat $BITBAKE_ENV_TMPFILE
- fi
- rm $BITBAKE_ENV_TMPFILE
- exit 1
- fi
- rm $BITBAKE_ENV_TMPFILE
- fi
-}
-
-setup_sysroot() {
- # Toolchain installs set up $OECORE_NATIVE_SYSROOT in their
- # environment script. If that variable isn't set, we're
- # either in an in-tree build scenario or the environment
- # script wasn't source'd.
- if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
- setup_path_vars
- BUILD_ARCH=`uname -m`
- BUILD_OS=`uname | tr '[A-Z]' '[a-z]'`
- BUILD_SYS="$BUILD_ARCH-$BUILD_OS"
-
- OECORE_NATIVE_SYSROOT=$OE_TMPDIR/sysroots/$BUILD_SYS
- fi
-
- # Some recipes store the BIOS under $OE_TMPDIR/sysroots/$MACHINE,
- # now defined as OECORE_MACHINE_SYSROOT. The latter is used when searching
- # BIOS, VGA BIOS and keymaps.
- if [ -z "$OECORE_MACHINE_SYSROOT" ]; then
- OECORE_MACHINE_SYSROOT=$OE_TMPDIR/sysroots/$MACHINE
- fi
-}
-
-# Locate a rootfs image to boot which matches our expected
-# machine and fstype.
-findimage() {
- where=$1
- machine=$2
- extension=$3
-
- # Sort rootfs candidates by modification time - the most
- # recently created one is the one we most likely want to boot.
- filename=`ls -t1 $where/*-image*$machine.$extension 2>/dev/null | head -n1`
- if [ "x$filename" != "x" ]; then
- ROOTFS=$filename
- return
- fi
-
- echo "Couldn't find a $machine rootfs image in $where."
- exit 1
-}
-
-if [ -e "$ROOTFS" -a -z "$FSTYPE" ]; then
- # Extract the filename extension
- EXT=`echo $ROOTFS | awk -F . '{ print \$NF }'`
- if [ "x$EXT" = "xext2" -o "x$EXT" = "xext3" -o \
- "x$EXT" = "xjffs2" -o "x$EXT" = "xbtrfs" -o \
- "x$EXT" = "xext4" ]; then
- FSTYPE=$EXT
- else
- echo "Note: Unable to determine filesystem extension for $ROOTFS"
- echo "We will use the default FSTYPE for $MACHINE"
- # ...which is done further below...
- fi
-fi
-
-if [ -z "$KERNEL" -a "$IS_VM" = "false" ]; then \
- setup_path_vars 1
- eval kernel_file=\$${machine2}_DEFAULT_KERNEL
- KERNEL=$DEPLOY_DIR_IMAGE/$kernel_file
-
- if [ -z "$KERNEL" ]; then
- error "Unable to determine default kernel for MACHINE [$MACHINE]"
- fi
-fi
-# KERNEL is now set for all cases
-
-if [ -z "$FSTYPE" ]; then
- eval FSTYPE=\$${machine2}_DEFAULT_FSTYPE
-
- if [ -z "$FSTYPE" ]; then
- error "Unable to determine default fstype for MACHINE [$MACHINE]"
- fi
-fi
-
-# FSTYPE is now set for all cases
-
-# Handle cases where a ROOTFS type is given instead of a filename, e.g.
-# core-image-sato
-if [ "$LAZY_ROOTFS" = "true" ]; then
- setup_path_vars 1
- echo "Assuming $ROOTFS really means $DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE"
- if [ "$IS_VM" = "true" ]; then
- VM=$DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE
- else
- ROOTFS=$DEPLOY_DIR_IMAGE/$ROOTFS-$MACHINE.$FSTYPE
- fi
-fi
-
-if [ -z "$ROOTFS" ]; then
- setup_path_vars 1
- T=$DEPLOY_DIR_IMAGE
- eval rootfs_list=\$${machine2}_DEFAULT_ROOTFS
- findimage $T $MACHINE $FSTYPE
-
- if [ -z "$ROOTFS" ]; then
- error "Unable to determine default rootfs for MACHINE [$MACHINE]"
- elif [ "$IS_VM" = "true" ]; then
- VM=$ROOTFS
- fi
-fi
-# ROOTFS is now set for all cases, now expand it to be an absolute path, it should exist at this point
-
-ROOTFS=`readlink -f $ROOTFS`
-
-echo ""
-echo "Continuing with the following parameters:"
-if [ "$IS_VM" = "false" ]; then
- echo "KERNEL: [$KERNEL]"
- echo "ROOTFS: [$ROOTFS]"
-else
- echo "VM: [$VM]"
-fi
-echo "FSTYPE: [$FSTYPE]"
-
-setup_sysroot
-# OECORE_NATIVE_SYSROOT and OECORE_MACHINE_SYSROOT are now set for all cases
-
-INTERNAL_SCRIPT="$0-internal"
-if [ ! -f "$INTERNAL_SCRIPT" -o ! -r "$INTERNAL_SCRIPT" ]; then
-INTERNAL_SCRIPT=`which runqemu-internal`
-fi
-
-# Specify directory for BIOS, VGA BIOS and keymaps
-if [ ! -z "$CUSTOMBIOSDIR" ]; then
- if [ -d "$OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR" ]; then
- echo "Assuming biosdir is $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR"
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR"
- elif [ -d "$OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR" ]; then
- echo "Assuming biosdir is $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR"
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR"
- else
- if [ ! -d "$CUSTOMBIOSDIR" ]; then
- echo "Custom BIOS directory not found. Tried: $CUSTOMBIOSDIR"
- echo "and $OECORE_NATIVE_SYSROOT/$CUSTOMBIOSDIR"
- echo "and $OECORE_MACHINE_SYSROOT/$CUSTOMBIOSDIR"
- exit 1;
- fi
- echo "Assuming biosdir is $CUSTOMBIOSDIR"
- SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -L $CUSTOMBIOSDIR"
- fi
-fi
-
-. $INTERNAL_SCRIPT
-exit $?
+ runqemu qemuarm
+ runqemu tmp/deploy/images/qemuarm
+ runqemu tmp/deploy/images/qemux86/.qemuboot.conf
+ runqemu qemux86-64 core-image-sato ext4
+ runqemu qemux86-64 wic-image-minimal wic
+ runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
+ runqemu qemux86 iso/hddimg/vmdk/qcow2/vdi/ramfs/cpio.gz...
+ runqemu qemux86 qemuparams="-m 256"
+ runqemu qemux86 bootparams="psplash=false"
+ runqemu path/to/<image>-<machine>.vmdk
+ runqemu path/to/<image>-<machine>.wic
+""")
+
+def check_tun():
+ """Check /dev/net/run"""
+ dev_tun = '/dev/net/tun'
+ if not os.path.exists(dev_tun):
+ raise Exception("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
+
+ if not os.access(dev_tun, os.W_OK):
+ raise Exception("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
+
+def check_libgl(qemu_bin):
+ cmd = 'ldd %s' % qemu_bin
+ logger.info('Running %s...' % cmd)
+ need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ if re.search('libGLU', need_gl):
+ # We can't run without a libGL.so
+ libgl = False
+ check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
+ ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
+ ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
+
+ for (f1, f2) in check_files:
+ if re.search('\*', f1):
+ for g1 in glob.glob(f1):
+ if libgl:
+ break
+ if os.path.exists(g1):
+ for g2 in glob.glob(f2):
+ if os.path.exists(g2):
+ libgl = True
+ break
+ if libgl:
+ break
+ else:
+ if os.path.exists(f1) and os.path.exists(f2):
+ libgl = True
+ break
+ if not libgl:
+ logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
+ logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
+ logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
+ raise Exception('%s requires libGLU, but not found' % qemu_bin)
+
+def get_first_file(cmds):
+ """Return first file found in wildcard cmds"""
+ for cmd in cmds:
+ all_files = glob.glob(cmd)
+ if all_files:
+ for f in all_files:
+ if not os.path.isdir(f):
+ return f
+ return ''
+
+class BaseConfig(object):
+ def __init__(self):
+ # Vars can be merged with .qemuboot.conf, use a dict to manage them.
+ self.d = {
+ 'MACHINE': '',
+ 'DEPLOY_DIR_IMAGE': '',
+ 'QB_KERNEL_ROOT': '/dev/vda',
+ }
+
+ self.qemu_opt = ''
+ self.qemu_opt_script = ''
+ self.nfs_dir = ''
+ self.clean_nfs_dir = False
+ self.nfs_server = ''
+ self.rootfs = ''
+ self.qemuboot = ''
+ self.qbconfload = False
+ self.kernel = ''
+ self.kernel_cmdline = ''
+ self.kernel_cmdline_script = ''
+ self.dtb = ''
+ self.fstype = ''
+ self.kvm_enabled = False
+ self.vhost_enabled = False
+ self.slirp_enabled = False
+ self.nfs_instance = 0
+ self.nfs_running = False
+ self.serialstdio = False
+ self.cleantap = False
+ self.saved_stty = ''
+ self.audio_enabled = False
+ self.tcpserial_portnum = ''
+ self.custombiosdir = ''
+ self.lock = ''
+ self.lock_descriptor = ''
+ self.bitbake_e = ''
+ self.snapshot = False
+ self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs', 'cpio.gz', 'cpio', 'ramfs')
+ self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'vmdk', 'qcow2', 'vdi', 'iso')
+
+ def acquire_lock(self):
+ logger.info("Acquiring lockfile %s..." % self.lock)
+ try:
+ self.lock_descriptor = open(self.lock, 'w')
+ fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ logger.info("Acquiring lockfile %s failed: %s" % (self.lock, e))
+ if self.lock_descriptor:
+ self.lock_descriptor.close()
+ return False
+ return True
+
+ def release_lock(self):
+ fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
+ self.lock_descriptor.close()
+ os.remove(self.lock)
+
+ def get(self, key):
+ if key in self.d:
+ return self.d.get(key)
+ else:
+ return ''
+
+ def set(self, key, value):
+ self.d[key] = value
+
+ def is_deploy_dir_image(self, p):
+ if os.path.isdir(p):
+ if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M):
+ logger.info("Can't find required *.qemuboot.conf in %s" % p)
+ return False
+ if not re.search('-image-', '\n'.join(os.listdir(p))):
+ logger.info("Can't find *-image-* in %s" % p)
+ return False
+ return True
+ else:
+ return False
+
+ def check_arg_fstype(self, fst):
+ """Check and set FSTYPE"""
+ if fst not in self.fstypes + self.vmtypes:
+ logger.warn("Maybe unsupported FSTYPE: %s" % fst)
+ if not self.fstype or self.fstype == fst:
+ if fst == 'ramfs':
+ fst = 'cpio.gz'
+ self.fstype = fst
+ else:
+ raise Exception("Conflicting: FSTYPE %s and %s" % (self.fstype, fst))
+
+ def set_machine_deploy_dir(self, machine, deploy_dir_image):
+ """Set MACHINE and DEPLOY_DIR_IMAGE"""
+ logger.info('MACHINE: %s' % machine)
+ self.set("MACHINE", machine)
+ logger.info('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image)
+ self.set("DEPLOY_DIR_IMAGE", deploy_dir_image)
+
+ def check_arg_nfs(self, p):
+ if os.path.isdir(p):
+ self.nfs_dir = p
+ else:
+ m = re.match('(.*):(.*)', p)
+ self.nfs_server = m.group(1)
+ self.nfs_dir = m.group(2)
+ self.rootfs = ""
+ self.check_arg_fstype('nfs')
+
+ def check_arg_path(self, p):
+ """
+ - Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
+ - Check whether is a kernel file
+ - Check whether is a image file
+ - Check whether it is a nfs dir
+ """
+ if p.endswith('.qemuboot.conf'):
+ self.qemuboot = p
+ self.qbconfload = True
+ elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ re.search('zImage', p) or re.search('vmlinux', p) or \
+ re.search('fitImage', p) or re.search('uImage', p):
+ self.kernel = p
+ elif os.path.exists(p) and (not os.path.isdir(p)) and re.search('-image-', os.path.basename(p)):
+ self.rootfs = p
+ dirpath = os.path.dirname(p)
+ m = re.search('(.*)\.(.*)$', p)
+ if m:
+ qb = '%s%s' % (re.sub('\.rootfs$', '', m.group(1)), '.qemuboot.conf')
+ if os.path.exists(qb):
+ self.qemuboot = qb
+ self.qbconfload = True
+ else:
+ logger.warn("%s doesn't exist" % qb)
+ fst = m.group(2)
+ self.check_arg_fstype(fst)
+ else:
+ raise Exception("Can't find FSTYPE from: %s" % p)
+ elif os.path.isdir(p) or re.search(':', arg) and re.search('/', arg):
+ if self.is_deploy_dir_image(p):
+ logger.info('DEPLOY_DIR_IMAGE: %s' % p)
+ self.set("DEPLOY_DIR_IMAGE", p)
+ else:
+ logger.info("Assuming %s is an nfs rootfs" % p)
+ self.check_arg_nfs(p)
+ else:
+ raise Exception("Unknown path arg %s" % p)
+
+ def check_arg_machine(self, arg):
+ """Check whether it is a machine"""
+ if self.get('MACHINE') and self.get('MACHINE') != arg or re.search('/', arg):
+ raise Exception("Unknown arg: %s" % arg)
+ elif self.get('MACHINE') == arg:
+ return
+ logger.info('Assuming MACHINE = %s' % arg)
+
+ # if we're running under testimage, or similarly as a child
+ # of an existing bitbake invocation, we can't invoke bitbake
+ # to validate the MACHINE setting and must assume it's correct...
+ # FIXME: testimage.bbclass exports these two variables into env,
+ # are there other scenarios in which we need to support being
+ # invoked by bitbake?
+ deploy = os.environ.get('DEPLOY_DIR_IMAGE')
+ bbchild = deploy and os.environ.get('OE_TMPDIR')
+ if bbchild:
+ self.set_machine_deploy_dir(arg, deploy)
+ return
+ # also check whether we're running under a sourced toolchain
+ # environment file
+ if os.environ.get('OECORE_NATIVE_SYSROOT'):
+ self.set("MACHINE", arg)
+ return
+
+ cmd = 'MACHINE=%s bitbake -e' % arg
+ logger.info('Running %s...' % cmd)
+ self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ # bitbake -e doesn't report invalid MACHINE as an error, so
+ # let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
+ # MACHINE.
+ s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
+ if s:
+ deploy_dir_image = s.group(1)
+ else:
+ raise Exception("bitbake -e %s" % self.bitbake_e)
+ if self.is_deploy_dir_image(deploy_dir_image):
+ self.set_machine_deploy_dir(arg, deploy_dir_image)
+ else:
+ logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
+ self.set("MACHINE", arg)
+
+ def check_args(self):
+ unknown_arg = ""
+ for arg in sys.argv[1:]:
+ if arg in self.fstypes + self.vmtypes:
+ self.check_arg_fstype(arg)
+ elif arg == 'nographic':
+ self.qemu_opt_script += ' -nographic'
+ self.kernel_cmdline_script += ' console=ttyS0'
+ elif arg == 'serial':
+ self.kernel_cmdline_script += ' console=ttyS0'
+ self.serialstdio = True
+ elif arg == 'audio':
+ logger.info("Enabling audio in qemu")
+ logger.info("Please install sound drivers in linux host")
+ self.audio_enabled = True
+ elif arg == 'kvm':
+ self.kvm_enabled = True
+ elif arg == 'kvm-vhost':
+ self.vhost_enabled = True
+ elif arg == 'slirp':
+ self.slirp_enabled = True
+ elif arg == 'snapshot':
+ self.snapshot = True
+ elif arg == 'publicvnc':
+ self.qemu_opt_script += ' -vnc :0'
+ elif arg.startswith('tcpserial='):
+ self.tcpserial_portnum = arg[len('tcpserial='):]
+ elif arg.startswith('biosdir='):
+ self.custombiosdir = arg[len('biosdir='):]
+ elif arg.startswith('biosfilename='):
+ self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ elif arg.startswith('qemuparams='):
+ self.qemu_opt_script += ' %s' % arg[len('qemuparams='):]
+ elif arg.startswith('bootparams='):
+ self.kernel_cmdline_script += ' %s' % arg[len('bootparams='):]
+ elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
+ self.check_arg_path(os.path.abspath(arg))
+ elif re.search('-image-', arg):
+ # Lazy rootfs
+ self.rootfs = arg
+ else:
+ # At last, assume is it the MACHINE
+ if (not unknown_arg) or unknown_arg == arg:
+ unknown_arg = arg
+ else:
+ raise Exception("Can't handle two unknown args: %s %s" % (unknown_arg, arg))
+ # Check to make sure it is a valid machine
+ if unknown_arg:
+ if self.get('MACHINE') == unknown_arg:
+ return
+ if not self.get('DEPLOY_DIR_IMAGE'):
+ # Trying to get DEPLOY_DIR_IMAGE from env.
+ p = os.getenv('DEPLOY_DIR_IMAGE')
+ if p and self.is_deploy_dir_image(p):
+ machine = os.path.basename(p)
+ if unknown_arg == machine:
+ self.set_machine_deploy_dir(machine, p)
+ return
+ else:
+ logger.info('DEPLOY_DIR_IMAGE: %s' % p)
+ self.set("DEPLOY_DIR_IMAGE", p)
+ self.check_arg_machine(unknown_arg)
+
+ def check_kvm(self):
+ """Check kvm and kvm-host"""
+ if not (self.kvm_enabled or self.vhost_enabled):
+ self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'))
+ return
+
+ if not self.get('QB_CPU_KVM'):
+ raise Exception("QB_CPU_KVM is NULL, this board doesn't support kvm")
+
+ self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
+ yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
+ yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
+ dev_kvm = '/dev/kvm'
+ dev_vhost = '/dev/vhost-net'
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise Exception(yocto_kvm_wiki)
+
+ if not os.path.exists(dev_kvm):
+ logger.error("Missing KVM device. Have you inserted kvm modules?")
+ logger.error("For further help see:")
+ raise Exception(yocto_kvm_wiki)
+
+ if os.access(dev_kvm, os.W_OK|os.R_OK):
+ self.qemu_opt_script += ' -enable-kvm'
+ else:
+ logger.error("You have no read or write permission on /dev/kvm.")
+ logger.error("Please change the ownership of this file as described at:")
+ raise Exception(yocto_kvm_wiki)
+
+ if self.vhost_enabled:
+ if not os.path.exists(dev_vhost):
+ logger.error("Missing virtio net device. Have you inserted vhost-net module?")
+ logger.error("For further help see:")
+ raise Exception(yocto_paravirt_kvm_wiki)
+
+ if not os.access(dev_kvm, os.W_OK|os.R_OK):
+ logger.error("You have no read or write permission on /dev/vhost-net.")
+ logger.error("Please change the ownership of this file as described at:")
+ raise Exception(yocto_kvm_wiki)
+
+ def check_fstype(self):
+ """Check and setup FSTYPE"""
+ if not self.fstype:
+ fstype = self.get('QB_DEFAULT_FSTYPE')
+ if fstype:
+ self.fstype = fstype
+ else:
+ raise Exception("FSTYPE is NULL!")
+
+ def check_rootfs(self):
+ """Check and set rootfs"""
+
+ if self.fstype == 'nfs':
+ return
+
+ if self.rootfs and not os.path.exists(self.rootfs):
+ # Lazy rootfs
+ self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
+ self.rootfs, self.get('MACHINE'),
+ self.fstype)
+ elif not self.rootfs:
+ cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
+ cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
+ cmds = (cmd_name, cmd_link)
+ self.rootfs = get_first_file(cmds)
+ if not self.rootfs:
+ raise Exception("Failed to find rootfs: %s or %s" % cmds)
+
+ if not os.path.exists(self.rootfs):
+ raise Exception("Can't find rootfs: %s" % self.rootfs)
+
+ def check_kernel(self):
+ """Check and set kernel, dtb"""
+ # The vm image doesn't need a kernel
+ if self.fstype in self.vmtypes:
+ return
+
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ if not self.kernel:
+ kernel_match_name = "%s/%s" % (deploy_dir_image, self.get('QB_DEFAULT_KERNEL'))
+ kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
+ kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
+ cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
+ self.kernel = get_first_file(cmds)
+ if not self.kernel:
+ raise Exception('KERNEL not found: %s, %s or %s' % cmds)
+
+ if not os.path.exists(self.kernel):
+ raise Exception("KERNEL %s not found" % self.kernel)
+
+ dtb = self.get('QB_DTB')
+ if dtb:
+ cmd_match = "%s/%s" % (deploy_dir_image, dtb)
+ cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
+ cmd_wild = "%s/*.dtb" % deploy_dir_image
+ cmds = (cmd_match, cmd_startswith, cmd_wild)
+ self.dtb = get_first_file(cmds)
+ if not os.path.exists(self.dtb):
+ raise Exception('DTB not found: %s, %s or %s' % cmds)
+
+ def check_biosdir(self):
+ """Check custombiosdir"""
+ if not self.custombiosdir:
+ return
+
+ biosdir = ""
+ biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
+ biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
+ for i in (self.custombiosdir, biosdir_native, biosdir_host):
+ if os.path.isdir(i):
+ biosdir = i
+ break
+
+ if biosdir:
+ logger.info("Assuming biosdir is: %s" % biosdir)
+ self.qemu_opt_script += ' -L %s' % biosdir
+ else:
+ logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
+ raise Exception("Invalid custombiosdir: %s" % self.custombiosdir)
+
+ def check_mem(self):
+ s = re.search('-m +([0-9]+)', self.qemu_opt_script)
+ if s:
+ self.set('QB_MEM', '-m %s' % s.group(1))
+ elif not self.get('QB_MEM'):
+ logger.info('QB_MEM is not set, use 512M by default')
+ self.set('QB_MEM', '-m 512')
+
+ self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
+ self.qemu_opt_script += ' %s' % self.get('QB_MEM')
+
+ def check_tcpserial(self):
+ if self.tcpserial_portnum:
+ if self.get('QB_TCPSERIAL_OPT'):
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ else:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+
+ def check_and_set(self):
+ """Check configs sanity and set when needed"""
+ self.validate_paths()
+ check_tun()
+ # Check audio
+ if self.audio_enabled:
+ if not self.get('QB_AUDIO_DRV'):
+ raise Exception("QB_AUDIO_DRV is NULL, this board doesn't support audio")
+ if not self.get('QB_AUDIO_OPT'):
+ logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
+ else:
+ self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')
+ os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))
+ else:
+ os.putenv('QEMU_AUDIO_DRV', 'none')
+
+ self.check_kvm()
+ self.check_fstype()
+ self.check_rootfs()
+ self.check_kernel()
+ self.check_biosdir()
+ self.check_mem()
+ self.check_tcpserial()
+
+ def read_qemuboot(self):
+ if not self.qemuboot:
+ if self.get('DEPLOY_DIR_IMAGE'):
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ elif os.getenv('DEPLOY_DIR_IMAGE'):
+ deploy_dir_image = os.getenv('DEPLOY_DIR_IMAGE')
+ else:
+ logger.info("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
+ return
+
+ if self.rootfs and not os.path.exists(self.rootfs):
+ # Lazy rootfs
+ machine = self.get('MACHINE')
+ if not machine:
+ machine = os.path.basename(deploy_dir_image)
+ self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image,
+ self.rootfs, machine)
+ else:
+ cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
+ logger.info('Running %s...' % cmd)
+ qbs = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ if qbs:
+ self.qemuboot = qbs.split()[0]
+ self.qbconfload = True
+
+ if not self.qemuboot:
+ # If we haven't found a .qemuboot.conf at this point it probably
+ # doesn't exist, continue without
+ return
+
+ if not os.path.exists(self.qemuboot):
+ raise Exception("Failed to find <image>.qemuboot.conf!")
+
+ logger.info('CONFFILE: %s' % self.qemuboot)
+
+ cf = configparser.ConfigParser()
+ cf.read(self.qemuboot)
+ for k, v in cf.items('config_bsp'):
+ k_upper = k.upper()
+ self.set(k_upper, v)
+
+ def validate_paths(self):
+ """Ensure all relevant path variables are set"""
+ # When we're started with a *.qemuboot.conf arg assume that image
+ # artefacts are relative to that file, rather than in whatever
+ # directory DEPLOY_DIR_IMAGE in the conf file points to.
+ if self.qbconfload:
+ imgdir = os.path.dirname(self.qemuboot)
+ if imgdir != self.get('DEPLOY_DIR_IMAGE'):
+ logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))
+ self.set('DEPLOY_DIR_IMAGE', imgdir)
+
+ # If the STAGING_*_NATIVE directories from the config file don't exist
+ # and we're in a sourced OE build directory try to extract the paths
+ # from `bitbake -e`
+ havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \
+ os.path.exists(self.get('STAGING_BINDIR_NATIVE'))
+
+ if not havenative:
+ if not self.bitbake_e:
+ self.load_bitbake_env()
+
+ if self.bitbake_e:
+ native_vars = ['STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE']
+ for nv in native_vars:
+ s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M)
+ if s and s.group(1) != self.get(nv):
+ logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1)))
+ self.set(nv, s.group(1))
+ else:
+ # when we're invoked from a running bitbake instance we won't
+ # be able to call `bitbake -e`, then try:
+ # - get OE_TMPDIR from environment and guess paths based on it
+ # - get OECORE_NATIVE_SYSROOT from environment (for sdk)
+ tmpdir = os.environ.get('OE_TMPDIR', None)
+ oecore_native_sysroot = os.environ.get('OECORE_NATIVE_SYSROOT', None)
+ if tmpdir:
+ logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)
+ hostos, _, _, _, machine = os.uname()
+ buildsys = '%s-%s' % (machine, hostos.lower())
+ staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys)
+ self.set('STAGING_DIR_NATIVE', staging_dir_native)
+ elif oecore_native_sysroot:
+ logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot)
+ self.set('STAGING_DIR_NATIVE', oecore_native_sysroot)
+ if self.get('STAGING_DIR_NATIVE'):
+ # we have to assume that STAGING_BINDIR_NATIVE is at usr/bin
+ staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')
+ logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native)
+ self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
+
+ def print_config(self):
+ logger.info('Continuing with the following parameters:\n')
+ if not self.fstype in self.vmtypes:
+ print('KERNEL: [%s]' % self.kernel)
+ if self.dtb:
+ print('DTB: [%s]' % self.dtb)
+ print('MACHINE: [%s]' % self.get('MACHINE'))
+ print('FSTYPE: [%s]' % self.fstype)
+ if self.fstype == 'nfs':
+ print('NFS_DIR: [%s]' % self.nfs_dir)
+ else:
+ print('ROOTFS: [%s]' % self.rootfs)
+ print('CONFFILE: [%s]' % self.qemuboot)
+ print('')
+
+ def setup_nfs(self):
+ if not self.nfs_server:
+ if self.slirp_enabled:
+ self.nfs_server = '10.0.2.2'
+ else:
+ self.nfs_server = '192.168.7.1'
+
+ nfs_instance = int(self.nfs_instance)
+
+ mountd_rpcport = 21111 + nfs_instance
+ nfsd_rpcport = 11111 + nfs_instance
+ nfsd_port = 3049 + 2 * nfs_instance
+ mountd_port = 3048 + 2 * nfs_instance
+ unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port)
+ self.unfs_opts = unfs_opts
+
+ p = '%s/.runqemu-sdk/pseudo' % os.getenv('HOME')
+ os.putenv('PSEUDO_LOCALSTATEDIR', p)
+
+ # Extract .tar.bz2 or .tar.bz if no self.nfs_dir
+ if not self.nfs_dir:
+ src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'))
+ dest = "%s-nfsroot" % src_prefix
+ if os.path.exists('%s.pseudo_state' % dest):
+ logger.info('Use %s as NFS_DIR' % dest)
+ self.nfs_dir = dest
+ else:
+ src = ""
+ src1 = '%s.tar.bz2' % src_prefix
+ src2 = '%s.tar.gz' % src_prefix
+ if os.path.exists(src1):
+ src = src1
+ elif os.path.exists(src2):
+ src = src2
+ if not src:
+ raise Exception("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
+ logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest))
+ cmd = 'runqemu-extract-sdk %s %s' % (src, dest)
+ logger.info('Running %s...' % cmd)
+ if subprocess.call(cmd, shell=True) != 0:
+ raise Exception('Failed to run %s' % cmd)
+ self.clean_nfs_dir = True
+ self.nfs_dir = dest
+
+ # Start the userspace NFS server
+ cmd = 'runqemu-export-rootfs restart %s' % self.nfs_dir
+ logger.info('Running %s...' % cmd)
+ if subprocess.call(cmd, shell=True) != 0:
+ raise Exception('Failed to run %s' % cmd)
+
+ self.nfs_running = True
+
+
+ def setup_slirp(self):
+ if self.fstype == 'nfs':
+ self.setup_nfs()
+ self.kernel_cmdline_script += ' ip=dhcp'
+ self.set('NETWORK_CMD', self.get('QB_SLIRP_OPT'))
+
+ def setup_tap(self):
+ """Setup tap"""
+
+ # This file is created when runqemu-gen-tapdevs creates a bank of tap
+ # devices, indicating that the user should not bring up new ones using
+ # sudo.
+ nosudo_flag = '/etc/runqemu-nosudo'
+ self.qemuifup = shutil.which('runqemu-ifup')
+ self.qemuifdown = shutil.which('runqemu-ifdown')
+ ip = shutil.which('ip')
+ lockdir = "/tmp/qemu-tap-locks"
+
+ if not (self.qemuifup and self.qemuifdown and ip):
+ raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
+
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ except FileExistsError:
+ pass
+
+ cmd = '%s link' % ip
+ logger.info('Running %s...' % cmd)
+ ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ # Matches line like: 6: tap0: <foo>
+ possibles = re.findall('^[1-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ tap = ""
+ for p in possibles:
+ lockfile = os.path.join(lockdir, p)
+ if os.path.exists('%s.skip' % lockfile):
+ logger.info('Found %s.skip, skipping %s' % (lockfile, p))
+ continue
+ self.lock = lockfile + '.lock'
+ if self.acquire_lock():
+ tap = p
+ logger.info("Using preconfigured tap device %s" % tap)
+ logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
+ break
+
+ if not tap:
+ if os.path.exists(nosudo_flag):
+ logger.error("Error: There are no available tap devices to use for networking,")
+ logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag)
+ raise Exception("a new one with sudo.")
+
+ gid = os.getgid()
+ uid = os.getuid()
+ logger.info("Setting up tap interface under sudo")
+ cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.get('STAGING_DIR_NATIVE'))
+ tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n')
+ lockfile = os.path.join(lockdir, tap)
+ self.lock = lockfile + '.lock'
+ self.acquire_lock()
+ self.cleantap = True
+ logger.info('Created tap: %s' % tap)
+
+ if not tap:
+ logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
+ return 1
+ self.tap = tap
+ n0 = tap[3:]
+ n1 = int(n0) * 2 + 1
+ n2 = n1 + 1
+ self.nfs_instance = n0
+ if self.fstype == 'nfs':
+ self.setup_nfs()
+ self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (n2, n1)
+ mac = "52:54:00:12:34:%02x" % n2
+ qb_tap_opt = self.get('QB_TAP_OPT')
+ if qb_tap_opt:
+ qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap).replace('@MAC@', mac)
+ else:
+ qemu_tap_opt = "-device virtio-net-pci,netdev=net0,mac=%s -netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (mac, self.tap)
+
+ if self.vhost_enabled:
+ qemu_tap_opt += ',vhost=on'
+
+ self.set('NETWORK_CMD', qemu_tap_opt)
+
+ def setup_network(self):
+ cmd = "stty -g"
+ self.saved_stty = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ if self.slirp_enabled:
+ self.setup_slirp()
+ else:
+ self.setup_tap()
+
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+
+ qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
+ if qb_rootfs_opt:
+ self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs)
+ else:
+ self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
+
+ if self.fstype in ('cpio.gz', 'cpio'):
+ self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
+ self.rootfs_options = '-initrd %s' % self.rootfs
+ else:
+ if self.fstype in self.vmtypes:
+ if self.fstype == 'iso':
+ vm_drive = '-cdrom %s' % self.rootfs
+ else:
+ cmd1 = "grep -q 'root=/dev/sd' %s" % self.rootfs
+ cmd2 = "grep -q 'root=/dev/hd' %s" % self.rootfs
+ if subprocess.call(cmd1, shell=True) == 0:
+ logger.info('Using scsi drive')
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
+ % (self.rootfs, rootfs_format)
+ elif subprocess.call(cmd2, shell=True) == 0:
+ logger.info('Using scsi drive')
+ vm_drive = "%s,format=%s" % (self.rootfs, rootfs_format)
+ else:
+ logger.warn("Can't detect drive type %s" % self.rootfs)
+ logger.warn('Tring to use virtio block drive')
+ vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
+ self.rootfs_options = '%s -no-reboot' % vm_drive
+ self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+
+ if self.fstype == 'nfs':
+ self.rootfs_options = ''
+ k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, self.nfs_dir, self.unfs_opts)
+ self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+
+ self.set('ROOTFS_OPTIONS', self.rootfs_options)
+
+ def guess_qb_system(self):
+ """attempt to determine the appropriate qemu-system binary"""
+ mach = self.get('MACHINE')
+ if not mach:
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
+ if self.rootfs:
+ match = re.match(search, self.rootfs)
+ if match:
+ mach = match.group(1)
+ elif self.kernel:
+ match = re.match(search, self.kernel)
+ if match:
+ mach = match.group(1)
+
+ if not mach:
+ return None
+
+ if mach == 'qemuarm':
+ qbsys = 'arm'
+ elif mach == 'qemuarm64':
+ qbsys = 'aarch64'
+ elif mach == 'qemux86':
+ qbsys = 'i386'
+ elif mach == 'qemux86-64':
+ qbsys = 'x86_64'
+ elif mach == 'qemuppc':
+ qbsys = 'ppc'
+ elif mach == 'qemumips':
+ qbsys = 'mips'
+ elif mach == 'qemumips64':
+ qbsys = 'mips64'
+ elif mach == 'qemumipsel':
+ qbsys = 'mipsel'
+ elif mach == 'qemumips64el':
+ qbsys = 'mips64el'
+
+ return 'qemu-system-%s' % qbsys
+
+ def setup_final(self):
+ qemu_system = self.get('QB_SYSTEM_NAME')
+ if not qemu_system:
+ qemu_system = self.guess_qb_system()
+ if not qemu_system:
+ raise Exception("Failed to boot, QB_SYSTEM_NAME is NULL!")
+
+ qemu_bin = '%s/%s' % (self.get('STAGING_BINDIR_NATIVE'), qemu_system)
+ if not os.access(qemu_bin, os.X_OK):
+ raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
+
+ check_libgl(qemu_bin)
+
+ self.qemu_opt = "%s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.qemu_opt_script, self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+
+ if self.snapshot:
+ self.qemu_opt += " -snapshot"
+
+ if self.serialstdio:
+ logger.info("Interrupt character is '^]'")
+ cmd = "stty intr ^]"
+ subprocess.call(cmd, shell=True)
+
+ first_serial = ""
+ if not re.search("-nographic", self.qemu_opt):
+ first_serial = "-serial mon:vc"
+ # We always want a ttyS1. Since qemu by default adds a serial
+ # port when nodefaults is not specified, it seems that all that
+ # would be needed is to make sure a "-serial" is there. However,
+ # it appears that when "-serial" is specified, it ignores the
+ # default serial port that is normally added. So here we make
+ # sure to add two -serial if there are none. And only one if
+ # there is one -serial already.
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num == 0:
+ self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT"))
+ elif serial_num == 1:
+ self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
+
+ def start_qemu(self):
+ if self.kernel:
+ kernel_opts = "-kernel %s -append '%s %s %s'" % (self.kernel, self.kernel_cmdline, self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'))
+ if self.dtb:
+ kernel_opts += " -dtb %s" % self.dtb
+ else:
+ kernel_opts = ""
+ cmd = "%s %s" % (self.qemu_opt, kernel_opts)
+ logger.info('Running %s' % cmd)
+ if subprocess.call(cmd, shell=True) != 0:
+ raise Exception('Failed to run %s' % cmd)
+
+ def cleanup(self):
+ if self.cleantap:
+ cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.get('STAGING_DIR_NATIVE'))
+ logger.info('Running %s' % cmd)
+ subprocess.call(cmd, shell=True)
+ if self.lock_descriptor:
+ logger.info("Releasing lockfile for tap device '%s'" % self.tap)
+ self.release_lock()
+
+ if self.nfs_running:
+ logger.info("Shutting down the userspace NFS server...")
+ cmd = "runqemu-export-rootfs stop %s" % self.nfs_dir
+ logger.info('Running %s' % cmd)
+ subprocess.call(cmd, shell=True)
+
+ if self.saved_stty:
+ cmd = "stty %s" % self.saved_stty
+ subprocess.call(cmd, shell=True)
+
+ if self.clean_nfs_dir:
+ logger.info('Removing %s' % self.nfs_dir)
+ shutil.rmtree(self.nfs_dir)
+ shutil.rmtree('%s.pseudo_state' % self.nfs_dir)
+
+ def load_bitbake_env(self, mach=None):
+ if self.bitbake_e:
+ return
+
+ bitbake = shutil.which('bitbake')
+ if not bitbake:
+ return
+
+ if not mach:
+ mach = self.get('MACHINE')
+
+ if mach:
+ cmd = 'MACHINE=%s bitbake -e' % mach
+ else:
+ cmd = 'bitbake -e'
+
+ logger.info('Running %s...' % cmd)
+ try:
+ self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ self.bitbake_e = ''
+ logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+
+def main():
+ if len(sys.argv) == 1 or "help" in sys.argv:
+ print_usage()
+ return 0
+ config = BaseConfig()
+ try:
+ config.check_args()
+ except Exception as esc:
+ logger.error(esc)
+ logger.error("Try 'runqemu help' on how to use it")
+ return 1
+ config.read_qemuboot()
+ config.check_and_set()
+ config.print_config()
+ try:
+ config.setup_network()
+ config.setup_final()
+ config.start_qemu()
+ finally:
+ config.cleanup()
+ return 0
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except OEPathError as err:
+ ret = 1
+ logger.error(err.message)
+ except Exception as esc:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs b/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
index 624deacb7..bfb60f44a 100755
--- a/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
+++ b/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
@@ -69,32 +69,38 @@ if [ ! -x "$IFCONFIG" ]; then
exit 1
fi
-# Ensure we start with a clean slate
-for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
- echo "Note: Destroying pre-existing tap interface $tap..."
- $TUNCTL -d $tap
-done
-
-echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
-for ((index=0; index < $COUNT; index++)); do
- echo "Creating tap$index"
- ifup=`$RUNQEMU_IFUP $TUID $GID $SYSROOT 2>&1`
- if [ $? -ne 0 ]; then
- echo "Error running tunctl: $ifup"
- exit 1
- fi
-done
+if [ $COUNT -ge 0 ]; then
+ # Ensure we start with a clean slate
+ for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ $TUNCTL -d $tap
+ done
+ rm -f /etc/runqemu-nosudo
+else
+ echo "Error: Incorrect count: $COUNT"
+ exit 1
+fi
if [ $COUNT -gt 0 ]; then
+ echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
+ for ((index=0; index < $COUNT; index++)); do
+ echo "Creating tap$index"
+ ifup=`$RUNQEMU_IFUP $TUID $GID $SYSROOT 2>&1`
+ if [ $? -ne 0 ]; then
+ echo "Error running tunctl: $ifup"
+ exit 1
+ fi
+ done
+
echo "Note: For systems running NetworkManager, it's recommended"
echo "Note: that the tap devices be set as unmanaged in the"
echo "Note: NetworkManager.conf file. Add the following lines to"
echo "Note: /etc/NetworkManager/NetworkManager.conf"
echo "[keyfile]"
echo "unmanaged-devices=interface-name:tap*"
-fi
-# The runqemu script will check for this file, and if it exists,
-# will use the existing bank of tap devices without creating
-# additional ones via sudo.
-touch /etc/runqemu-nosudo
+ # The runqemu script will check for this file, and if it exists,
+ # will use the existing bank of tap devices without creating
+ # additional ones via sudo.
+ touch /etc/runqemu-nosudo
+fi
diff --git a/import-layers/yocto-poky/scripts/runqemu-internal b/import-layers/yocto-poky/scripts/runqemu-internal
deleted file mode 100755
index ac1c703c5..000000000
--- a/import-layers/yocto-poky/scripts/runqemu-internal
+++ /dev/null
@@ -1,717 +0,0 @@
-#!/bin/bash -x
-
-# Handle running OE images under qemu
-#
-# Copyright (C) 2006-2011 Linux Foundation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-# Call setting:
-# QEMU_MEMORY (optional) - set the amount of memory in the emualted system.
-# SERIAL_LOGFILE (optional) - log the serial port output to a file
-#
-# Image options:
-# MACHINE - the machine to run
-# FSTYPE - the image type to run
-# KERNEL - the kernel image file to use
-# ROOTFS - the disk image file to use
-#
-
-mem_size=-1
-
-#Get rid of <> and get the contents of extra qemu running params
-SCRIPT_QEMU_EXTRA_OPT=`echo $SCRIPT_QEMU_EXTRA_OPT | sed -e 's/<//' -e 's/>//'`
-#if user set qemu memory, eg: -m 256 in qemu extra params, we need to do some
-# validation check
-mem_set=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-m[[:space:]] *[0-9]*\)'`
-if [ ! -z "$mem_set" ] ; then
-#Get memory setting size from user input
- mem_size=`echo $mem_set | sed 's/-m[[:space:]] *//'`
-fi
-
-# This file is created when runqemu-gen-tapdevs creates a bank of tap
-# devices, indicating that the user should not bring up new ones using
-# sudo.
-NOSUDO_FLAG="/etc/runqemu-nosudo"
-
-QEMUIFUP=`which runqemu-ifup 2> /dev/null`
-QEMUIFDOWN=`which runqemu-ifdown 2> /dev/null`
-if [ -z "$QEMUIFUP" -o ! -x "$QEMUIFUP" ]; then
- echo "runqemu-ifup cannot be found or executed"
- exit 1
-fi
-if [ -z "$QEMUIFDOWN" -o ! -x "$QEMUIFDOWN" ]; then
- echo "runqemu-ifdown cannot be found or executed"
- exit 1
-fi
-
-NFSRUNNING="false"
-
-#capture original stty values
-ORIG_STTY=$(stty -g)
-
-if [ "$SLIRP_ENABLED" = "yes" ]; then
- KERNEL_NETWORK_CMD="ip=dhcp"
- QEMU_TAP_CMD=""
- QEMU_UI_OPTIONS="-show-cursor -usb -usbdevice tablet"
- QEMU_NETWORK_CMD=""
- DROOT="/dev/vda"
- ROOTFS_OPTIONS="-drive file=$ROOTFS,if=virtio,format=raw"
-else
- acquire_lock() {
- lockfile=$1
- if [ -z "$lockfile" ]; then
- echo "Error: missing lockfile arg passed to acquire_lock()"
- return 1
- fi
-
- touch $lockfile.lock 2>/dev/null
- if [ $? -ne 0 ]; then
- echo "Acquiring lockfile for $lockfile.lock failed"
- return 1
- fi
- exec 8>$lockfile.lock
- flock -n -x 8
- if [ $? -ne 0 ]; then
- exec 8>&-
- return 1
- fi
-
- return 0
- }
-
- release_lock() {
- lockfile=$1
- if [ -z "$lockfile" ]; then
- echo "Error: missing lockfile arg passed to release_lock()"
- return 1
- fi
-
- rm -f $lockfile.lock
- exec 8>&-
- }
-
- LOCKDIR="/tmp/qemu-tap-locks"
- if [ ! -d "$LOCKDIR" ]; then
- mkdir $LOCKDIR
- chmod 777 $LOCKDIR
- fi
-
- IFCONFIG=`which ip 2> /dev/null`
- if [ -z "$IFCONFIG" ]; then
- IFCONFIG=/sbin/ip
- fi
- if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
- exit 1
- fi
-
- POSSIBLE=`$IFCONFIG link | grep 'tap' | awk '{print $2}' | sed -e 's/://' -e 's/@.*//'`
- TAP=""
- LOCKFILE=""
- USE_PRECONF_TAP="no"
- for tap in $POSSIBLE; do
- LOCKFILE="$LOCKDIR/$tap"
- if [ -e "$LOCKFILE.skip" ]; then
- echo "Found $LOCKFILE.skip, skipping $tap"
- continue
- fi
- echo "Acquiring lockfile for $tap..."
- acquire_lock $LOCKFILE
- if [ $? -eq 0 ]; then
- TAP=$tap
- USE_PRECONF_TAP="yes"
- break
- fi
- done
-
- if [ "$TAP" = "" ]; then
- if [ -e "$NOSUDO_FLAG" ]; then
- echo "Error: There are no available tap devices to use for networking,"
- echo "and I see $NOSUDO_FLAG exists, so I am not going to try creating"
- echo "a new one with sudo."
- exit 1
- fi
-
- GROUPID=`id -g`
- USERID=`id -u`
- echo "Setting up tap interface under sudo"
- # Redirect stderr since we could see a LD_PRELOAD warning here if pseudo is loaded
- # but inactive. This looks scary but is harmless
- tap=`sudo $QEMUIFUP $USERID $GROUPID $OECORE_NATIVE_SYSROOT 2> /dev/null`
- if [ $? -ne 0 ]; then
- # Re-run standalone to see verbose errors
- sudo $QEMUIFUP $USERID $GROUPID $OECORE_NATIVE_SYSROOT
- return 1
- fi
- LOCKFILE="$LOCKDIR/$tap"
- echo "Acquiring lockfile for $tap..."
- acquire_lock $LOCKFILE
- if [ $? -eq 0 ]; then
- TAP=$tap
- fi
- else
- echo "Using preconfigured tap device '$TAP'"
- echo "If this is not intended, touch $LOCKFILE.skip to make runqemu skip $TAP."
- fi
-
- cleanup() {
- if [ ! -e "$NOSUDO_FLAG" -a "$USE_PRECONF_TAP" = "no" ]; then
- # Redirect stderr since we could see a LD_PRELOAD warning here if pseudo is loaded
- # but inactive. This looks scary but is harmless
- sudo $QEMUIFDOWN $TAP $OECORE_NATIVE_SYSROOT 2> /dev/null
- fi
- echo "Releasing lockfile of preconfigured tap device '$TAP'"
- release_lock $LOCKFILE
-
- if [ "$NFSRUNNING" = "true" ]; then
- echo "Shutting down the userspace NFS server..."
- echo "runqemu-export-rootfs stop $ROOTFS"
- runqemu-export-rootfs stop $ROOTFS
- fi
- # If QEMU crashes or somehow tty properties are not restored
- # after qemu exits, we need to run stty sane
- #stty sane
-
- #instead of using stty sane we set the original stty values
- stty ${ORIG_STTY}
-
- }
-
-
- n0=$(echo $TAP | sed 's/tap//')
-
- case $n0 in
- ''|*[!0-9]*)
- echo "Error Couldn't turn $TAP into an interface number?"
- exit 1
- ;;
- esac
-
- n1=$(($n0 * 2 + 1))
- n2=$(($n1 + 1))
-
- KERNEL_NETWORK_CMD="ip=192.168.7.$n2::192.168.7.$n1:255.255.255.0"
- QEMU_TAP_CMD="-net tap,vlan=0,ifname=$TAP,script=no,downscript=no"
- if [ "$VHOST_ACTIVE" = "yes" ]; then
- QEMU_NETWORK_CMD="-net nic,model=virtio $QEMU_TAP_CMD,vhost=on"
- else
- QEMU_NETWORK_CMD="-net nic,model=virtio $QEMU_TAP_CMD"
- fi
- DROOT="/dev/vda"
- ROOTFS_OPTIONS="-drive file=$ROOTFS,if=virtio,format=raw"
-
- KERNCMDLINE="mem=$QEMU_MEMORY"
- QEMU_UI_OPTIONS="-show-cursor -usb -usbdevice tablet"
-
- NFS_INSTANCE=`echo $TAP | sed 's/tap//'`
- export NFS_INSTANCE
-
- SERIALOPTS=""
- if [ "x$SERIAL_LOGFILE" != "x" ]; then
- SERIALOPTS="-serial file:$SERIAL_LOGFILE"
- fi
-fi
-
-if [ ! -f "$KERNEL" -a "$IS_VM" = "false" ]; then
- echo "Error: Kernel image file $KERNEL doesn't exist"
- cleanup
- return 1
-fi
-
-if [ "$FSTYPE" != "nfs" -a "$IS_VM" = "false" -a ! -f "$ROOTFS" ]; then
- echo "Error: Image file $ROOTFS doesn't exist"
- cleanup
- return 1
-fi
-
-if [ "$NFS_SERVER" = "" ]; then
- NFS_SERVER="192.168.7.1"
- if [ "$SLIRP_ENABLED" = "yes" ]; then
- NFS_SERVER="10.0.2.2"
- fi
-fi
-
-if [ "$FSTYPE" = "nfs" ]; then
- NFS_DIR=`echo $ROOTFS | sed 's/^[^:]*:\(.*\)/\1/'`
- if [ "$NFS_INSTANCE" = "" ] ; then
- NFS_INSTANCE=0
- fi
- MOUNTD_RPCPORT=$[ 21111 + $NFS_INSTANCE ]
- NFSD_RPCPORT=$[ 11111 + $NFS_INSTANCE ]
- NFSD_PORT=$[ 3049 + 2 * $NFS_INSTANCE ]
- MOUNTD_PORT=$[ 3048 + 2 * $NFS_INSTANCE ]
- UNFS_OPTS="nfsvers=3,port=$NFSD_PORT,mountprog=$MOUNTD_RPCPORT,nfsprog=$NFSD_RPCPORT,udp,mountport=$MOUNTD_PORT"
-
- PSEUDO_LOCALSTATEDIR=~/.runqemu-sdk/pseudo
- export PSEUDO_LOCALSTATEDIR
-
- # Start the userspace NFS server
- echo "runqemu-export-rootfs restart $ROOTFS"
- runqemu-export-rootfs restart $ROOTFS
- if [ $? != 0 ]; then
- return 1
- fi
- NFSRUNNING="true"
-fi
-
-
-set_mem_size() {
- if [ ! -z "$mem_set" ] ; then
- #Get memory setting size from user input
- mem_size=`echo $mem_set | sed 's/-m[[:space:]] *//'`
- else
- mem_size=$1
- fi
- # QEMU_MEMORY has 'M' appended to mem_size
- QEMU_MEMORY="$mem_size"M
-
-}
-
-config_qemuarm() {
- set_mem_size 128
- QEMU=qemu-system-arm
- MACHINE_SUBTYPE=versatilepb
- export QEMU_AUDIO_DRV="none"
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- KERNCMDLINE="root=$DROOT rw console=ttyAMA0,115200 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY highres=off"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M ${MACHINE_SUBTYPE} $ROOTFS_OPTIONS -no-reboot $QEMU_UI_OPTIONS"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist"
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw console=ttyAMA0,115200 $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M ${MACHINE_SUBTYPE} --no-reboot $QEMU_UI_OPTIONS"
- fi
- if [ "$MACHINE" = "qemuarmv6" ]; then
- QEMUOPTIONS="$QEMUOPTIONS -cpu arm1136"
- fi
- if [ "$MACHINE" = "qemuarmv7" ]; then
- QEMUOPTIONS="$QEMUOPTIONS -cpu cortex-a8"
- fi
-}
-
-config_qemuarm64() {
- set_mem_size 512
- QEMU=qemu-system-aarch64
-
- QEMU_NETWORK_CMD="-netdev tap,id=net0,ifname=$TAP,script=no,downscript=no -device virtio-net-device,netdev=net0 "
- DROOT="/dev/vda"
- ROOTFS_OPTIONS="-drive id=disk0,file=$ROOTFS,if=none,format=raw -device virtio-blk-device,drive=disk0"
-
- export QEMU_AUDIO_DRV="none"
- if [ "x$SERIALSTDIO" = "x" ] ; then
- QEMU_UI_OPTIONS="-nographic"
- else
- QEMU_UI_OPTIONS=""
- fi
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- KERNCMDLINE="root=$DROOT rw console=ttyAMA0,38400 mem=$QEMU_MEMORY highres=off $KERNEL_NETWORK_CMD"
- # qemu-system-aarch64 only support '-machine virt -cpu cortex-a57' for now
- QEMUOPTIONS="$QEMU_NETWORK_CMD -machine virt -cpu cortex-a57 $ROOTFS_OPTIONS $QEMU_UI_OPTIONS"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist"
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw console=ttyAMA0,38400 mem=$QEMU_MEMORY highres=off $KERNEL_NETWORK_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -machine virt -cpu cortex-a57 $QEMU_UI_OPTIONS"
- fi
-}
-
-config_qemux86() {
- set_mem_size 256
- QEMU=qemu-system-i386
- if [ "$KVM_ACTIVE" = "yes" ]; then
- CPU_SUBTYPE=kvm32
- else
- CPU_SUBTYPE=qemu32
- fi
- if [ ! -z "$vga_option" ]; then
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
- else
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS -vga vmware"
- fi
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- KERNCMDLINE="vga=0 uvesafb.mode_option=640x480-32 root=$DROOT rw mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE $ROOTFS_OPTIONS $QEMU_UI_OPTIONS"
- fi
- if [ "${FSTYPE:0:4}" = "cpio" ]; then
- KERNCMDLINE="vga=0 uvesafb.mode_option=640x480-32 root=/dev/ram0 rw mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE -initrd $ROOTFS $QEMU_UI_OPTIONS"
- fi
-
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist."
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD $QEMU_UI_OPTIONS"
- fi
- if [ "$IS_VM" = "true" ]; then
- QEMUOPTIONS="$QEMU_NETWORK_CMD $QEMU_UI_OPTIONS"
- fi
- # Currently oprofile's event based interrupt mode doesn't work(Bug #828) in
- # qemux86 and qemux86-64. We can use timer interrupt mode for now.
- KERNCMDLINE="$KERNCMDLINE oprofile.timer=1"
-}
-
-config_qemux86_64() {
- set_mem_size 256
- QEMU=qemu-system-x86_64
- if [ "$KVM_ACTIVE" = "yes" ]; then
- CPU_SUBTYPE=kvm64
- else
- CPU_SUBTYPE=core2duo
- fi
- if [ ! -z "$vga_option" ]; then
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
- else
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS -vga vmware"
- fi
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- KERNCMDLINE="vga=0 uvesafb.mode_option=640x480-32 root=$DROOT rw mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE $ROOTFS_OPTIONS $QEMU_UI_OPTIONS"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "x$ROOTFS" = "x" ]; then
- ROOTFS=/srv/nfs/qemux86-64
- fi
- if [ ! -d "$ROOTFS" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist."
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE $QEMU_UI_OPTIONS"
- fi
- if [ "$IS_VM" = "true" ]; then
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE $QEMU_UI_OPTIONS"
- fi
- # Currently oprofile's event based interrupt mode doesn't work(Bug #828) in
- # qemux86 and qemux86-64. We can use timer interrupt mode for now.
- KERNCMDLINE="$KERNCMDLINE oprofile.timer=1"
-}
-
-config_qemumips() {
- set_mem_size 256
- case "$MACHINE" in
- qemumips) QEMU=qemu-system-mips ;;
- qemumipsel) QEMU=qemu-system-mipsel ;;
- qemumips64) QEMU=qemu-system-mips64 ;;
- esac
- MACHINE_SUBTYPE=malta
- QEMU_UI_OPTIONS="-vga cirrus $QEMU_UI_OPTIONS"
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- #KERNCMDLINE="root=/dev/hda console=ttyS0 console=tty0 $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- KERNCMDLINE="root=$DROOT rw console=ttyS0 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE $ROOTFS_OPTIONS -no-reboot $QEMU_UI_OPTIONS"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist"
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs console=ttyS0 console=tty nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE -no-reboot $QEMU_UI_OPTIONS"
- fi
-}
-
-config_qemuppc() {
- set_mem_size 256
- QEMU=qemu-system-ppc
- MACHINE_SUBTYPE=mac99
- CPU_SUBTYPE=G4
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
- if [ "$SLIRP_ENABLED" = "yes" ]; then
- QEMU_NETWORK_CMD=""
- else
- QEMU_NETWORK_CMD="-net nic,model=pcnet $QEMU_TAP_CMD"
- fi
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- KERNCMDLINE="root=$DROOT rw console=ttyS0 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE -M $MACHINE_SUBTYPE $ROOTFS_OPTIONS -no-reboot $QEMU_UI_OPTIONS"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist"
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs console=ttyS0 console=tty nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE -M $MACHINE_SUBTYPE -no-reboot $QEMU_UI_OPTIONS"
- fi
-}
-
-config_qemush4() {
- set_mem_size 1024
- QEMU=qemu-system-sh4
- MACHINE_SUBTYPE=r2d
- QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
- if [ "${FSTYPE:0:3}" = "ext" -o "$FSTYPE" = "btrfs" -o "$FSTYPE" = "wic" ]; then
- #KERNCMDLINE="root=/dev/hda console=ttyS0 console=tty0 $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- KERNCMDLINE="root=/dev/hda rw console=ttySC1 noiotrap earlyprintk=sh-sci.1 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE -hda $ROOTFS -no-reboot $QEMU_UI_OPTIONS -monitor null -serial vc -serial stdio"
- SERIALSTDIO="1"
- fi
- if [ "$FSTYPE" = "nfs" ]; then
- if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
- echo "Error: NFS mount point $ROOTFS doesn't exist"
- cleanup
- return 1
- fi
- KERNCMDLINE="root=/dev/nfs console=ttySC1 noiotrap earlyprintk=sh-sci.1 console=tty nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE -no-reboot $QEMU_UI_OPTIONS -monitor null -serial vc -serial stdio"
- SERIALSTDIO="1"
- fi
-}
-
-config_qemuzynq() {
- set_mem_size 1024
- QEMU=qemu-system-arm
- QEMU_SYSTEM_OPTIONS="$QEMU_NETWORK_CMD -M xilinx-zynq-a9 -serial null -serial mon:stdio -dtb $KERNEL-$MACHINE.dtb"
- # zynq serial ports are named 'ttyPS0' and 'ttyPS1', fixup the default values
- SCRIPT_KERNEL_OPT=$(echo "$SCRIPT_KERNEL_OPT" | sed 's/console=ttyS/console=ttyPS/g')
- if [ "${FSTYPE:0:3}" = "ext" -o "${FSTYPE:0:4}" = "cpio" ]; then
- KERNCMDLINE="earlyprintk root=/dev/ram rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_SYSTEM_OPTIONS -initrd $ROOTFS"
- fi
-}
-
-config_qemumicroblaze() {
- set_mem_size 256
- QEMU=qemu-system-microblazeel
- QEMU_SYSTEM_OPTIONS="$QEMU_NETWORK_CMD -M petalogix-ml605 -serial mon:stdio"
- if [ "${FSTYPE:0:3}" = "ext" -o "${FSTYPE:0:4}" = "cpio" ]; then
- KERNCMDLINE="earlyprintk root=/dev/ram rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_SYSTEM_OPTIONS -initrd $ROOTFS"
- fi
-}
-
-case "$MACHINE" in
- "qemuarm" | "qemuarmv6" | "qemuarmv7")
- config_qemuarm
- ;;
- "qemuarm64")
- config_qemuarm64
- ;;
- "qemux86")
- config_qemux86
- ;;
- "qemux86-64")
- config_qemux86_64
- ;;
- "qemumips" | "qemumipsel" | "qemumips64")
- config_qemumips
- ;;
- "qemuppc")
- config_qemuppc
- ;;
- "qemush4")
- config_qemush4
- ;;
- "qemuzynq")
- config_qemuzynq
- ;;
- "qemumicroblaze")
- config_qemumicroblaze
- ;;
- *)
- echo "Error: Unsupported machine type $MACHINE"
- return 1
- ;;
-esac
-
-# We need to specify -m <mem_size> to overcome a bug in qemu 0.14.0
-# https://bugs.launchpad.net/ubuntu/+source/qemu-kvm/+bug/584480
-if [ -z "$mem_set" ] ; then
- SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT -m $mem_size"
-fi
-
-if [ "${FSTYPE:0:3}" = "ext" ]; then
- KERNCMDLINE="$KERNCMDLINE rootfstype=$FSTYPE"
-fi
-
-if [ "$FSTYPE" = "cpio.gz" ]; then
- QEMUOPTIONS="-initrd $ROOTFS -nographic"
- KERNCMDLINE="root=/dev/ram0 console=ttyS0 debugshell"
-fi
-
-if [ "$FSTYPE" = "iso" ]; then
- QEMUOPTIONS="$QEMU_NETWORK_CMD -cdrom $ROOTFS $QEMU_UI_OPTIONS"
-fi
-
-if [ "x$QEMUOPTIONS" = "x" ]; then
- echo "Error: Unable to support this combination of options"
- cleanup
- return 1
-fi
-
-if [ "$TCPSERIAL_PORTNUM" != "" ]; then
- if [ "$MACHINE" = "qemuarm64" ]; then
- SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT -device virtio-serial-device -chardev socket,id=virtcon,port=$TCPSERIAL_PORTNUM,host=127.0.0.1 -device virtconsole,chardev=virtcon"
- else
- SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT -serial tcp:127.0.0.1:$TCPSERIAL_PORTNUM"
- fi
-fi
-
-PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$PATH
-
-QEMUBIN=`which $QEMU 2> /dev/null`
-if [ ! -x "$QEMUBIN" ]; then
- echo "Error: No QEMU binary '$QEMU' could be found."
- cleanup
- return 1
-fi
-
-NEED_GL=`ldd $QEMUBIN/$QEMU 2>&1 | grep libGLU`
-# We can't run without a libGL.so
-if [ "$NEED_GL" != "" ]; then
- libgl='no'
-
- [ -e /usr/lib/libGL.so -a -e /usr/lib/libGLU.so ] && libgl='yes'
- [ -e /usr/lib64/libGL.so -a -e /usr/lib64/libGLU.so ] && libgl='yes'
- [ -e /usr/lib/*-linux-gnu/libGL.so -a -e /usr/lib/*-linux-gnu/libGLU.so ] && libgl='yes'
-
- if [ "$libgl" != 'yes' ]; then
- echo "You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.
- Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.
- Fedora package names are: mesa-libGL-devel mesa-libGLU-devel."
- return 1;
- fi
-fi
-
-do_quit() {
- cleanup
- return 1
-}
-
-trap do_quit INT TERM QUIT
-
-# qemu got segfault if linked with nVidia's libgl
-GL_LD_PRELOAD=$LD_PRELOAD
-
-if ldd $QEMUBIN | grep -i nvidia &> /dev/null
-then
-cat << EOM
-WARNING: nVidia proprietary OpenGL libraries detected.
-nVidia's OpenGL libraries are known to have compatibility issues with qemu,
-resulting in a segfault. Please uninstall these drivers or ensure the mesa libGL
-libraries precede nvidia's via LD_PRELOAD(Already do it on Ubuntu 10).
-EOM
-
-# Automatically use Ubuntu system's mesa libGL, other distro can add its own path
-if grep -i ubuntu /etc/lsb-release &> /dev/null
-then
- # precede nvidia's driver on Ubuntu 10
- UBUNTU_MAIN_VERSION=`cat /etc/lsb-release |grep DISTRIB_RELEASE |cut -d= -f 2| cut -d. -f 1`
- if [ "$UBUNTU_MAIN_VERSION" = "10" ];
- then
- GL_PATH=""
- if test -e /usr/lib/libGL.so
- then
- GL_PATH="/usr/lib/libGL.so"
- elif test -e /usr/lib/x86_64-linux-gnu/libGL.so
- then
- GL_PATH="/usr/lib/x86_64-linux-gnu/libGL.so"
- fi
-
- echo "Skip nVidia's libGL on Ubuntu 10!"
- GL_LD_PRELOAD="$GL_PATH $LD_PRELOAD"
- fi
-fi
-fi
-
-if [ "x$SERIALSTDIO" = "x1" ]; then
- echo "Interrupt character is '^]'"
- stty intr ^]
-fi
-
-
-# Preserve the multiplexing behavior for the monitor that would be there based
-# on whether nographic is used.
-if echo "$QEMUOPTIONS $SERIALOPTS $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT" | grep -- "-nographic"; then
- FIRST_SERIAL_OPT="-serial mon:stdio"
-else
- FIRST_SERIAL_OPT="-serial mon:vc"
-fi
-
-# qemuarm64 uses virtio for any additional serial ports so the normal mechanism
-# of using -serial will not work
-if [ "$MACHINE" = "qemuarm64" ]; then
- SECOND_SERIAL_OPT="$SCRIPT_QEMU_EXTRA_OPT -device virtio-serial-device -chardev null,id=virtcon -device virtconsole,chardev=virtcon"
-else
- SECOND_SERIAL_OPT="-serial null"
-fi
-
-# We always want a ttyS1. Since qemu by default adds a serial port when
-# nodefaults is not specified, it seems that all that would be needed is to
-# make sure a "-serial" is there. However, it appears that when "-serial" is
-# specified, it ignores the default serial port that is normally added.
-# So here we make sure to add two -serial if there are none. And only one
-# if there is one -serial already.
-NUM_SERIAL_OPTS=`echo $QEMUOPTIONS $SERIALOPTS $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT | sed -e 's/ /\n/g' | grep --count -- -serial`
-
-if [ "$NUM_SERIAL_OPTS" = "0" ]; then
- SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT $FIRST_SERIAL_OPT $SECOND_SERIAL_OPT"
-elif [ "$NUM_SERIAL_OPTS" = "1" ]; then
- SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT $SECOND_SERIAL_OPT"
-fi
-
-echo "Running $QEMU..."
-# -no-reboot is a mandatory option - see bug #100
-if [ "$IS_VM" = "true" ]; then
- # Check root=/dev/sdX or root=/dev/vdX
- [ ! -e "$VM" ] && error "VM image is not found!"
- if grep -q 'root=/dev/sd' $VM; then
- echo "Using scsi drive"
- VM_DRIVE="-drive if=none,id=hd,file=$VM -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd"
- elif grep -q 'root=/dev/hd' $VM; then
- echo "Using ide drive"
- VM_DRIVE="$VM"
- else
- echo "Using virtio block drive"
- VM_DRIVE="-drive if=virtio,file=$VM"
- fi
- QEMU_FIRE="$QEMUBIN $VM_DRIVE $QEMUOPTIONS $SERIALOPTS -no-reboot $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT"
- echo $QEMU_FIRE
- LD_PRELOAD="$GL_LD_PRELOAD" $QEMU_FIRE
-elif [ "$FSTYPE" = "iso" -o "$FSTYPE" = "wic" ]; then
- QEMU_FIRE="$QEMUBIN $QEMUOPTIONS $SERIALOPTS -no-reboot $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT"
- echo $QEMU_FIRE
- LD_PRELOAD="$GL_LD_PRELOAD" $QEMU_FIRE
-else
- QEMU_FIRE="$QEMUBIN -kernel $KERNEL $QEMUOPTIONS $SLIRP_CMD $SERIALOPTS -no-reboot $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT"
- echo $QEMU_FIRE -append '"'$KERNCMDLINE $SCRIPT_KERNEL_OPT'"'
- LD_PRELOAD="$GL_LD_PRELOAD" $QEMU_FIRE -append "$KERNCMDLINE $SCRIPT_KERNEL_OPT"
-fi
-ret=$?
-if [ "$SLIRP_ENABLED" != "yes" ]; then
- cleanup
-fi
-
-#set the original stty values before exit
-stty ${ORIG_STTY}
-trap - INT TERM QUIT
-
-return $ret
diff --git a/import-layers/yocto-poky/scripts/send-error-report b/import-layers/yocto-poky/scripts/send-error-report
index a29feff32..15b5e8491 100755
--- a/import-layers/yocto-poky/scripts/send-error-report
+++ b/import-layers/yocto-poky/scripts/send-error-report
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Sends an error report (if the report-error class was enabled) to a
# remote server.
@@ -7,7 +7,7 @@
# Author: Andreea Proca <andreea.b.proca@intel.com>
# Author: Michael Wood <michael.g.wood@intel.com>
-import urllib2
+import urllib.request, urllib.error
import sys
import json
import os
@@ -25,10 +25,10 @@ log = logging.getLogger("send-error-report")
logging.basicConfig(format='%(levelname)s: %(message)s')
def getPayloadLimit(url):
- req = urllib2.Request(url, None)
+ req = urllib.request.Request(url, None)
try:
- response = urllib2.urlopen(req)
- except urllib2.URLError as e:
+ response = urllib.request.urlopen(req)
+ except urllib.error.URLError as e:
# Use this opportunity to bail out if we can't even contact the server
log.error("Could not contact server: " + url)
log.error(e.reason)
@@ -44,12 +44,12 @@ def getPayloadLimit(url):
def ask_for_contactdetails():
print("Please enter your name and your email (optionally), they'll be saved in the file you send.")
- username = raw_input("Name (required): ")
- email = raw_input("E-mail (not required): ")
+ username = input("Name (required): ")
+ email = input("E-mail (not required): ")
return username, email
def edit_content(json_file_path):
- edit = raw_input("Review information before sending? (y/n): ")
+ edit = input("Review information before sending? (y/n): ")
if 'y' in edit or 'Y' in edit:
editor = os.environ.get('EDITOR', None)
if editor:
@@ -108,7 +108,7 @@ def prepare_data(args):
if max_log_size != 0:
for fail in jsondata['failures']:
if len(fail['log']) > max_log_size:
- print "Truncating log to allow for upload"
+ print("Truncating log to allow for upload")
fail['log'] = fail['log'][-max_log_size:]
data = json.dumps(jsondata, indent=4, sort_keys=True)
@@ -125,7 +125,7 @@ def prepare_data(args):
with open(args.error_file, 'r') as json_fp:
data = json_fp.read()
- return data
+ return data.encode('utf-8')
def send_data(data, args):
@@ -136,14 +136,14 @@ def send_data(data, args):
else:
url = "http://"+args.server+"/ClientPost/"
- req = urllib2.Request(url, data=data, headers=headers)
+ req = urllib.request.Request(url, data=data, headers=headers)
try:
- response = urllib2.urlopen(req)
- except urllib2.HTTPError, e:
+ response = urllib.request.urlopen(req)
+ except urllib.error.HTTPError as e:
logging.error(e.reason)
sys.exit(1)
- print response.read()
+ print(response.read())
if __name__ == '__main__':
@@ -192,7 +192,7 @@ if __name__ == '__main__':
args = arg_parse.parse_args()
if (args.json == False):
- print "Preparing to send errors to: "+args.server
+ print("Preparing to send errors to: "+args.server)
data = prepare_data(args)
send_data(data, args)
diff --git a/import-layers/yocto-poky/scripts/swabber-strace-attach b/import-layers/yocto-poky/scripts/swabber-strace-attach
deleted file mode 100755
index bb0391a7c..000000000
--- a/import-layers/yocto-poky/scripts/swabber-strace-attach
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-import os
-import sys
-import subprocess
-
-# Detach from the controlling terminal and parent process by forking twice to daemonize ourselves,
-# then run the command passed as argv[1]. Send log data to argv[2].
-
-pid = os.fork()
-if (pid == 0):
- os.setsid()
- pid = os.fork()
- if (pid != 0):
- os._exit(0)
-else:
- sys.exit()
-
-
-si = file(os.devnull, 'r')
-so = file(sys.argv[2], 'w')
-se = so
-
-# Replace those fds with our own
-os.dup2(si.fileno(), sys.stdin.fileno())
-os.dup2(so.fileno(), sys.stdout.fileno())
-os.dup2(se.fileno(), sys.stderr.fileno())
-
-ret = subprocess.call(sys.argv[1], shell=True)
-
-os._exit(ret)
-
diff --git a/import-layers/yocto-poky/scripts/sysroot-relativelinks.py b/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
index ac26367e7..e44eba2b1 100755
--- a/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
+++ b/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
import sys
import os
diff --git a/import-layers/yocto-poky/scripts/test-dependencies.sh b/import-layers/yocto-poky/scripts/test-dependencies.sh
index 0170947f0..00c50e0d6 100755
--- a/import-layers/yocto-poky/scripts/test-dependencies.sh
+++ b/import-layers/yocto-poky/scripts/test-dependencies.sh
@@ -141,7 +141,7 @@ build_all() {
bitbake -k $targets 2>&1 | tee -a ${OUTPUT1}/complete.log
RESULT+=${PIPESTATUS[0]}
grep "ERROR: Task.*failed" ${OUTPUT1}/complete.log > ${OUTPUT1}/failed-tasks.log
- cat ${OUTPUT1}/failed-tasks.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g' | sort -u > ${OUTPUT1}/failed-recipes.log
+ cat ${OUTPUT1}/failed-tasks.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' | sort -u > ${OUTPUT1}/failed-recipes.log
}
build_every_recipe() {
@@ -178,7 +178,7 @@ build_every_recipe() {
RESULT+=${RECIPE_RESULT}
mv ${OUTPUTB}/${recipe}.log ${OUTPUTB}/failed/
grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | tee -a ${OUTPUTB}/failed-tasks.log
- grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g' >> ${OUTPUTB}/failed-recipes.log
+ grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' >> ${OUTPUTB}/failed-recipes.log
# and append also ${recipe} in case the failed task was from some dependency
echo ${recipe} >> ${OUTPUTB}/failed-recipes.log
else
diff --git a/import-layers/yocto-poky/scripts/test-remote-image b/import-layers/yocto-poky/scripts/test-remote-image
index 9c5b0158d..27b1cae38 100755
--- a/import-layers/yocto-poky/scripts/test-remote-image
+++ b/import-layers/yocto-poky/scripts/test-remote-image
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# Copyright (c) 2014 Intel Corporation
#
@@ -92,13 +92,11 @@ def get_args_parser():
parser.add_argument('--skip-download', required=False, action="store_true", dest="skip_download", default=False, help='Skip downloading the images completely. This needs the correct files to be present in the directory specified by the target profile.')
return parser
-class BaseTargetProfile(object):
+class BaseTargetProfile(object, metaclass=ABCMeta):
"""
This class defines the meta profile for a specific target (MACHINE type + image type).
"""
- __metaclass__ = ABCMeta
-
def __init__(self, image_type):
self.image_type = image_type
@@ -191,13 +189,11 @@ class AutoTargetProfile(BaseTargetProfile):
return controller.get_extra_files()
-class BaseRepoProfile(object):
+class BaseRepoProfile(object, metaclass=ABCMeta):
"""
This class defines the meta profile for an images repository.
"""
- __metaclass__ = ABCMeta
-
def __init__(self, repolink, localdir):
self.localdir = localdir
self.repolink = repolink
@@ -289,7 +285,7 @@ class HwAuto():
result = bitbake("%s -c testimage" % image_type, ignore_status=True, postconfig=postconfig)
testimage_results = ftools.read_file(os.path.join(get_bb_var("T", image_type), "log.do_testimage"))
log.info('Runtime tests results for %s:' % image_type)
- print testimage_results
+ print(testimage_results)
return result
# Start the procedure!
diff --git a/import-layers/yocto-poky/scripts/tiny/dirsize.py b/import-layers/yocto-poky/scripts/tiny/dirsize.py
index 40ff4ab89..ddccc5a8c 100755
--- a/import-layers/yocto-poky/scripts/tiny/dirsize.py
+++ b/import-layers/yocto-poky/scripts/tiny/dirsize.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
# All rights reserved.
@@ -52,26 +52,22 @@ class Record:
self.size = 0
self.records = []
- def __cmp__(this, that):
+ def __lt__(this, that):
if that is None:
- return 1
+ return False
if not isinstance(that, Record):
raise TypeError
if len(this.records) > 0 and len(that.records) == 0:
- return -1
- if len(this.records) == 0 and len(that.records) > 0:
- return 1
- if this.size < that.size:
- return -1
+ return False
if this.size > that.size:
- return 1
- return 0
+ return False
+ return True
def show(self, minsize):
total = 0
if self.size <= minsize:
return 0
- print "%10d %s" % (self.size, self.path)
+ print("%10d %s" % (self.size, self.path))
for r in self.records:
total += r.show(minsize)
if len(self.records) == 0:
@@ -85,8 +81,8 @@ def main():
minsize = int(sys.argv[1])
rootfs = Record.create(".")
total = rootfs.show(minsize)
- print "Displayed %d/%d bytes (%.2f%%)" % \
- (total, rootfs.size, 100 * float(total) / rootfs.size)
+ print("Displayed %d/%d bytes (%.2f%%)" % \
+ (total, rootfs.size, 100 * float(total) / rootfs.size))
if __name__ == "__main__":
diff --git a/import-layers/yocto-poky/scripts/tiny/ksize.py b/import-layers/yocto-poky/scripts/tiny/ksize.py
index 4006f2f6f..b9d2b192c 100755
--- a/import-layers/yocto-poky/scripts/tiny/ksize.py
+++ b/import-layers/yocto-poky/scripts/tiny/ksize.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
# All rights reserved.
@@ -28,16 +28,14 @@ import sys
import getopt
import os
from subprocess import *
-from string import join
-
def usage():
prog = os.path.basename(sys.argv[0])
- print 'Usage: %s [OPTION]...' % (prog)
- print ' -d, display an additional level of drivers detail'
- print ' -h, --help display this help and exit'
- print ''
- print 'Run %s from the top-level Linux kernel build directory.' % (prog)
+ print('Usage: %s [OPTION]...' % prog)
+ print(' -d, display an additional level of drivers detail')
+ print(' -h, --help display this help and exit')
+ print('')
+ print('Run %s from the top-level Linux kernel build directory.' % prog)
class Sizes:
@@ -55,8 +53,8 @@ class Sizes:
self.text = self.data = self.bss = self.total = 0
def show(self, indent=""):
- print "%-32s %10d | %10d %10d %10d" % \
- (indent+self.title, self.total, self.text, self.data, self.bss)
+ print("%-32s %10d | %10d %10d %10d" % \
+ (indent+self.title, self.total, self.text, self.data, self.bss))
class Report:
@@ -66,7 +64,7 @@ class Report:
p = Popen("ls " + path + "/*.o | grep -v built-in.o",
shell=True, stdout=PIPE, stderr=PIPE)
- glob = join(p.communicate()[0].splitlines())
+ glob = ' '.join(p.communicate()[0].splitlines())
oreport = Report(glob, path + "/*.o")
oreport.sizes.title = path + "/*.o"
r.parts.append(oreport)
@@ -101,22 +99,22 @@ class Report:
def show(self, indent=""):
rule = str.ljust(indent, 80, '-')
- print "%-32s %10s | %10s %10s %10s" % \
- (indent+self.title, "total", "text", "data", "bss")
- print rule
+ print("%-32s %10s | %10s %10s %10s" % \
+ (indent+self.title, "total", "text", "data", "bss"))
+ print(rule)
self.sizes.show(indent)
- print rule
+ print(rule)
for p in self.parts:
if p.sizes.total > 0:
p.sizes.show(indent)
- print rule
- print "%-32s %10d | %10d %10d %10d" % \
+ print(rule)
+ print("%-32s %10d | %10d %10d %10d" % \
(indent+"sum", self.totals["total"], self.totals["text"],
- self.totals["data"], self.totals["bss"])
- print "%-32s %10d | %10d %10d %10d" % \
+ self.totals["data"], self.totals["bss"]))
+ print("%-32s %10d | %10d %10d %10d" % \
(indent+"delta", self.deltas["total"], self.deltas["text"],
- self.deltas["data"], self.deltas["bss"])
- print "\n"
+ self.deltas["data"], self.deltas["bss"]))
+ print("\n")
def __cmp__(this, that):
if that is None:
@@ -133,8 +131,8 @@ class Report:
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "dh", ["help"])
- except getopt.GetoptError, err:
- print '%s' % str(err)
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
usage()
sys.exit(2)
diff --git a/import-layers/yocto-poky/scripts/verify-bashisms b/import-layers/yocto-poky/scripts/verify-bashisms
new file mode 100755
index 000000000..0741e1844
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/verify-bashisms
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+
+import sys, os, subprocess, re, shutil
+
+whitelist = (
+ # type is supported by dash
+ 'if type systemctl >/dev/null 2>/dev/null; then',
+ 'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
+ 'if type update-rc.d >/dev/null 2>/dev/null; then',
+ 'command -v',
+ # HOSTNAME is set locally
+ 'buildhistory_single_commit "$CMDLINE" "$HOSTNAME"',
+ # False-positive, match is a grep not shell expression
+ 'grep "^$groupname:[^:]*:[^:]*:\\([^,]*,\\)*$username\\(,[^,]*\\)*"',
+ # TODO verify dash's '. script args' behaviour
+ '. $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE'
+ )
+
+def is_whitelisted(s):
+ for w in whitelist:
+ if w in s:
+ return True
+ return False
+
+def process(recipe, function, script):
+ import tempfile
+
+ if not script.startswith("#!"):
+ script = "#! /bin/sh\n" + script
+
+ fn = tempfile.NamedTemporaryFile(mode="w+t")
+ fn.write(script)
+ fn.flush()
+
+ try:
+ subprocess.check_output(("checkbashisms.pl", fn.name), universal_newlines=True, stderr=subprocess.STDOUT)
+ # No bashisms, so just return
+ return
+ except subprocess.CalledProcessError as e:
+ # TODO check exit code is 1
+
+ # Replace the temporary filename with the function and split it
+ output = e.output.replace(fn.name, function).splitlines()
+ if len(results) % 2 != 0:
+ print("Unexpected output from checkbashism: %s" % str(output))
+ return
+
+ # Turn the output into a list of (message, source) values
+ result = []
+ # Check the results against the whitelist
+ for message, source in zip(output[0::2], output[1::2]):
+ if not is_whitelisted(source):
+ result.append((message, source))
+ return result
+
+def get_tinfoil():
+ scripts_path = os.path.dirname(os.path.realpath(__file__))
+ lib_path = scripts_path + '/lib'
+ sys.path = sys.path + [lib_path]
+ import scriptpath
+ scriptpath.add_bitbake_lib_path()
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil()
+ tinfoil.prepare()
+ # tinfoil.logger.setLevel(logging.WARNING)
+ return tinfoil
+
+if __name__=='__main__':
+ import shutil
+ if shutil.which("checkbashisms.pl") is None:
+ print("Cannot find checkbashisms.pl on $PATH")
+ sys.exit(1)
+
+ tinfoil = get_tinfoil()
+
+ # This is only the default configuration and should iterate over
+ # recipecaches to handle multiconfig environments
+ pkg_pn = tinfoil.cooker.recipecaches[""].pkg_pn
+
+ # TODO: use argparse and have --help
+ if len(sys.argv) > 1:
+ initial_pns = sys.argv[1:]
+ else:
+ initial_pns = sorted(pkg_pn)
+
+ pns = []
+ print("Generating file list...")
+ for pn in initial_pns:
+ for fn in pkg_pn[pn]:
+ # There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
+ realfn, _, _ = bb.cache.virtualfn2realfn(fn)
+ if realfn not in pns:
+ pns.append(realfn)
+
+
+ def func(fn):
+ result = []
+ data = tinfoil.parse_recipe_file(fn)
+ for key in data.keys():
+ if data.getVarFlag(key, "func", True) and not data.getVarFlag(key, "python", True):
+ script = data.getVar(key, False)
+ if not script: continue
+ #print ("%s:%s" % (fn, key))
+ r = process(fn, key, script)
+ if r: result.extend(r)
+ return fn, result
+
+ print("Scanning scripts...\n")
+ import multiprocessing
+ pool = multiprocessing.Pool()
+ for pn,results in pool.imap(func, pns):
+ if results:
+ print(pn)
+ for message,source in results:
+ print(" %s\n %s" % (message, source))
+ print()
diff --git a/import-layers/yocto-poky/scripts/wic b/import-layers/yocto-poky/scripts/wic
index 2286f20a9..fe2c33f0e 100755
--- a/import-layers/yocto-poky/scripts/wic
+++ b/import-layers/yocto-poky/scripts/wic
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
@@ -28,7 +28,6 @@
# AUTHORS
# Tom Zanussi <tom.zanussi (at] linux.intel.com>
#
-
__version__ = "0.2.0"
# Python Standard Library modules
@@ -114,6 +113,7 @@ def wic_create_subcommand(args, usage_str):
parser.add_option("-c", "--compress-with", choices=("gzip", "bzip2", "xz"),
dest='compressor',
help="compress image with specified compressor")
+ parser.add_option("-m", "--bmap", action="store_true", help="generate .bmap")
parser.add_option("-v", "--vars", dest='vars_dir',
help="directory with <image>.env files that store "
"bitbake variables")
@@ -140,8 +140,8 @@ def wic_create_subcommand(args, usage_str):
if not val:
missed.append(opt)
if missed:
- print "The following build artifacts are not specified:"
- print " " + ", ".join(missed)
+ print("The following build artifacts are not specified:")
+ print(" " + ", ".join(missed))
sys.exit(1)
if options.image_name:
@@ -153,12 +153,12 @@ def wic_create_subcommand(args, usage_str):
BB_VARS.vars_dir = options.vars_dir
if options.build_check:
- print "Checking basic build environment..."
+ print("Checking basic build environment...")
if not engine.verify_build_env():
- print "Couldn't verify build environment, exiting\n"
+ print("Couldn't verify build environment, exiting\n")
sys.exit(1)
else:
- print "Done.\n"
+ print("Done.\n")
bootimg_dir = ""
@@ -168,7 +168,7 @@ def wic_create_subcommand(args, usage_str):
if options.debug:
argv.append("--debug")
- print "Building rootfs...\n"
+ print("Building rootfs...\n")
if bitbake_main(BitBakeConfigParameters(argv),
cookerdata.CookerConfiguration()):
sys.exit(1)
@@ -179,7 +179,7 @@ def wic_create_subcommand(args, usage_str):
options.image_name)
else:
if options.build_rootfs:
- print "Image name is not specified, exiting. (Use -e/--image-name to specify it)\n"
+ print("Image name is not specified, exiting. (Use -e/--image-name to specify it)\n")
sys.exit(1)
wks_file = args[0]
@@ -187,9 +187,9 @@ def wic_create_subcommand(args, usage_str):
if not wks_file.endswith(".wks"):
wks_file = engine.find_canned_image(scripts_path, wks_file)
if not wks_file:
- print "No image named %s found, exiting. (Use 'wic list images' "\
+ print("No image named %s found, exiting. (Use 'wic list images' "\
"to list available images, or specify a fully-qualified OE "\
- "kickstart (.wks) filename)\n" % args[0]
+ "kickstart (.wks) filename)\n" % args[0])
sys.exit(1)
image_output_dir = ""
@@ -204,16 +204,16 @@ def wic_create_subcommand(args, usage_str):
kernel_dir = options.kernel_dir
native_sysroot = options.native_sysroot
if rootfs_dir and not os.path.isdir(rootfs_dir):
- print "--roofs-dir (-r) not found, exiting\n"
+ print("--roofs-dir (-r) not found, exiting\n")
sys.exit(1)
if not os.path.isdir(bootimg_dir):
- print "--bootimg-dir (-b) not found, exiting\n"
+ print("--bootimg-dir (-b) not found, exiting\n")
sys.exit(1)
if not os.path.isdir(kernel_dir):
- print "--kernel-dir (-k) not found, exiting\n"
+ print("--kernel-dir (-k) not found, exiting\n")
sys.exit(1)
if not os.path.isdir(native_sysroot):
- print "--native-sysroot (-n) not found, exiting\n"
+ print("--native-sysroot (-n) not found, exiting\n")
sys.exit(1)
else:
not_found = not_found_dir = ""
@@ -226,12 +226,12 @@ def wic_create_subcommand(args, usage_str):
if not_found:
if not not_found_dir:
not_found_dir = "Completely missing artifact - wrong image (.wks) used?"
- print "Build artifacts not found, exiting."
- print " (Please check that the build artifacts for the machine"
- print " selected in local.conf actually exist and that they"
- print " are the correct artifacts for the image (.wks file)).\n"
- print "The artifact that couldn't be found was %s:\n %s" % \
- (not_found, not_found_dir)
+ print("Build artifacts not found, exiting.")
+ print(" (Please check that the build artifacts for the machine")
+ print(" selected in local.conf actually exist and that they")
+ print(" are the correct artifacts for the image (.wks file)).\n")
+ print("The artifact that couldn't be found was %s:\n %s" % \
+ (not_found, not_found_dir))
sys.exit(1)
krootfs_dir = options.rootfs_dir
@@ -241,10 +241,10 @@ def wic_create_subcommand(args, usage_str):
rootfs_dir = rootfs_dir_to_args(krootfs_dir)
- print "Creating image(s)...\n"
+ print("Creating image(s)...\n")
engine.wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
native_sysroot, scripts_path, image_output_dir,
- options.compressor, options.debug)
+ options.compressor, options.bmap, options.debug)
def wic_list_subcommand(args, usage_str):
@@ -294,7 +294,7 @@ subcommands = {
def start_logging(loglevel):
- logging.basicConfig(filname='wic.log', filemode='w', level=loglevel)
+ logging.basicConfig(filename='wic.log', filemode='w', level=loglevel)
def main(argv):
@@ -318,6 +318,6 @@ if __name__ == "__main__":
try:
sys.exit(main(sys.argv[1:]))
except WicError as err:
- print >> sys.stderr, "ERROR:", err
+ print("ERROR:", err, file=sys.stderr)
sys.exit(1)
diff --git a/import-layers/yocto-poky/scripts/wipe-sysroot b/import-layers/yocto-poky/scripts/wipe-sysroot
index 9e067e8df..5e6b1a4e2 100755
--- a/import-layers/yocto-poky/scripts/wipe-sysroot
+++ b/import-layers/yocto-poky/scripts/wipe-sysroot
@@ -51,4 +51,4 @@ rm -rf $STAMPS_DIR/*/*/*.do_packagedata.*
rm -rf $STAMPS_DIR/*/*/*.do_packagedata_setscene.*
# The sstate manifests
-rm -rf $SSTATE_MANIFESTS/manifest-*.populate-sysroot
+rm -rf $SSTATE_MANIFESTS/manifest-*.populate_sysroot
diff --git a/import-layers/yocto-poky/scripts/yocto-bsp b/import-layers/yocto-poky/scripts/yocto-bsp
index 82a050ebd..6fb1f419c 100755
--- a/import-layers/yocto-poky/scripts/yocto-bsp
+++ b/import-layers/yocto-poky/scripts/yocto-bsp
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
@@ -30,120 +30,128 @@
# Tom Zanussi <tom.zanussi (at] intel.com>
#
-__version__ = "0.1.0"
-
import os
import sys
-import optparse
+import argparse
import logging
-scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
-lib_path = scripts_path + '/lib'
-sys.path = sys.path + [lib_path]
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, scripts_path + '/lib')
+import argparse_oe
from bsp.help import *
from bsp.engine import *
-def yocto_bsp_create_subcommand(args, usage_str):
+def do_create_bsp(args):
"""
Command-line handling for BSP creation. The real work is done by
bsp.engine.yocto_bsp_create()
"""
- parser = optparse.OptionParser(usage = usage_str)
-
- parser.add_option("-o", "--outdir", dest = "outdir", action = "store",
- help = "name of BSP dir to create")
- parser.add_option("-i", "--infile", dest = "properties_file", action = "store",
- help = "name of file containing the values for BSP properties as a JSON file")
- parser.add_option("-c", "--codedump", dest = "codedump", action = "store_true",
- default = False, help = "dump the generated code to bspgen.out")
- parser.add_option("-s", "--skip-git-check", dest = "git_check", action = "store_false",
- default = True, help = "skip the git connectivity check")
- (options, args) = parser.parse_args(args)
-
- if len(args) != 2:
- logging.error("Wrong number of arguments, exiting\n")
- parser.print_help()
- sys.exit(1)
-
- machine = args[0]
- karch = args[1]
-
- if options.outdir:
- bsp_output_dir = options.outdir
+ if args.outdir:
+ bsp_output_dir = args.outdir
else:
- bsp_output_dir = "meta-" + machine
+ bsp_output_dir = "meta-" + args.bspname
- if options.git_check and not options.properties_file:
- print "Checking basic git connectivity..."
+ if args.git_check and not args.properties_file:
+ print("Checking basic git connectivity...")
if not verify_git_repo(GIT_CHECK_URI):
- print "Couldn't verify git connectivity, exiting\n"
- print "Details: couldn't access %s" % GIT_CHECK_URI
- print " (this most likely indicates a network connectivity problem or"
- print " a misconfigured git intallation)"
+ print("Couldn't verify git connectivity, exiting\n")
+ print("Details: couldn't access %s" % GIT_CHECK_URI)
+ print(" (this most likely indicates a network connectivity problem or")
+ print(" a misconfigured git intallation)")
sys.exit(1)
else:
- print "Done.\n"
+ print("Done.\n")
- yocto_bsp_create(machine, karch, scripts_path, bsp_output_dir, options.codedump, options.properties_file)
+ yocto_bsp_create(args.bspname, args.karch, scripts_path, bsp_output_dir, args.codedump, args.properties_file)
-def yocto_bsp_list_subcommand(args, usage_str):
+def do_list_bsp(args):
"""
Command-line handling for listing available BSP properties and
values. The real work is done by bsp.engine.yocto_bsp_list()
"""
- parser = optparse.OptionParser(usage = usage_str)
-
- parser.add_option("-o", "--outfile", action = "store", dest = "properties_file",
- help = "dump the possible values for BSP properties to a JSON file")
-
- (options, args) = parser.parse_args(args)
-
- if not yocto_bsp_list(args, scripts_path, options.properties_file):
- logging.error("Bad list arguments, exiting\n")
- parser.print_help()
- sys.exit(1)
+ yocto_bsp_list(args, scripts_path)
+def do_help_bsp(args):
+ """
+ Command-line help tool
+ """
+ help_text = command_help.get(args.subcommand)
+ pager = subprocess.Popen('less', stdin=subprocess.PIPE)
+ pager.communicate(bytes(help_text,'UTF-8'))
-subcommands = {
- "create": [yocto_bsp_create_subcommand,
- yocto_bsp_create_usage,
- yocto_bsp_create_help],
- "list": [yocto_bsp_list_subcommand,
- yocto_bsp_list_usage,
- yocto_bsp_list_help],
+command_help = {
+ "create": yocto_bsp_create_help,
+ "list": yocto_bsp_list_help
}
def start_logging(loglevel):
- logging.basicConfig(filname = 'yocto-bsp.log', filemode = 'w', level=loglevel)
+ logging.basicConfig(filename = 'yocto-bsp.log', filemode = 'w', level=loglevel)
def main():
- parser = optparse.OptionParser(version = "yocto-bsp version %s" % __version__,
- usage = yocto_bsp_usage)
+ parser = argparse_oe.ArgumentParser(description='Create a customized Yocto BSP layer.',
+ epilog="See '%(prog)s help <subcommand>' for more information on a specific command.")
- parser.disable_interspersed_args()
- parser.add_option("-D", "--debug", dest = "debug", action = "store_true",
+ parser.add_argument("-D", "--debug", action = "store_true",
default = False, help = "output debug information")
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ create_parser = subparsers.add_parser('create', help='Create a new Yocto BSP',
+ description='Create a new Yocto BSP')
+ create_parser.add_argument('bspname', metavar='bsp-name', help='name for the new BSP')
+ create_parser.add_argument('karch', help='kernel architecture')
+ create_parser.add_argument("-o", "--outdir", help = "name of BSP dir to create")
+ create_parser.add_argument("-i", "--infile", dest = "properties_file",
+ help = "name of file containing the values for BSP properties as a JSON file")
+ create_parser.add_argument("-c", "--codedump", action = "store_true", default = False,
+ help = "dump the generated code to bspgen.out")
+ create_parser.add_argument("-s", "--skip-git-check", dest = "git_check", action = "store_false",
+ default = True, help = "skip the git connectivity check")
+ create_parser.set_defaults(func=do_create_bsp)
+
- (options, args) = parser.parse_args()
+ list_parser = subparsers.add_parser('list', help='List available values for options and BSP properties')
+ list_parser.add_argument('karch', help='kernel architecture')
+ prop_group = list_parser.add_mutually_exclusive_group()
+ prop_group.add_argument("--properties", action = "store_true", default = False,
+ help = "list all properties for the kernel architecture")
+ prop_group.add_argument("--property", help = "list available values for the property")
+ list_parser.add_argument("-o", "--outfile", dest = "properties_file",
+ help = "dump the possible values for BSP properties to a JSON file")
+
+ list_parser.set_defaults(func=do_list_bsp)
+
+ help_parser = subparsers.add_parser('help',
+ description='This command displays detailed help for the specified subcommand.')
+ help_parser.add_argument('subcommand', nargs='?')
+ help_parser.set_defaults(func=do_help_bsp)
+
+ args = parser.parse_args()
loglevel = logging.INFO
- if options.debug:
+ if args.debug:
loglevel = logging.DEBUG
start_logging(loglevel)
- if len(args):
- if args[0] == "help":
- if len(args) == 1:
- parser.print_help()
- sys.exit()
+ if args._subparser_name == "list":
+ if not args.karch == "karch" and not args.properties and not args.property:
+ print ("yocto-bsp list: error: one of the arguments --properties --property is required")
+ list_parser.print_help()
- invoke_subcommand(args, parser, yocto_bsp_help_usage, subcommands)
+ if args._subparser_name == "help":
+ if not args.subcommand:
+ parser.print_help()
+ return 0
+ elif not command_help.get(args.subcommand):
+ print ("yocto-bsp help: No manual entry for %s" % args.subcommand)
+ return 1
+ return args.func(args)
if __name__ == "__main__":
try:
@@ -153,4 +161,3 @@ if __name__ == "__main__":
import traceback
traceback.print_exc()
sys.exit(ret)
-
diff --git a/import-layers/yocto-poky/scripts/yocto-kernel b/import-layers/yocto-poky/scripts/yocto-kernel
index daaad0752..5c70d0c8c 100755
--- a/import-layers/yocto-poky/scripts/yocto-kernel
+++ b/import-layers/yocto-poky/scripts/yocto-kernel
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
@@ -351,7 +351,7 @@ subcommands = {
def start_logging(loglevel):
- logging.basicConfig(filname = 'yocto-kernel.log', filemode = 'w', level=loglevel)
+ logging.basicConfig(filename = 'yocto-kernel.log', filemode = 'w', level=loglevel)
def main():
diff --git a/import-layers/yocto-poky/scripts/yocto-layer b/import-layers/yocto-poky/scripts/yocto-layer
index 356972ece..d58facaa4 100755
--- a/import-layers/yocto-poky/scripts/yocto-layer
+++ b/import-layers/yocto-poky/scripts/yocto-layer
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
@@ -113,7 +113,7 @@ subcommands = {
def start_logging(loglevel):
- logging.basicConfig(filname = 'yocto-layer.log', filemode = 'w', level=loglevel)
+ logging.basicConfig(filename = 'yocto-layer.log', filemode = 'w', level=loglevel)
def main():
OpenPOWER on IntegriCloud