summaryrefslogtreecommitdiffstats
path: root/poky/scripts
diff options
context:
space:
mode:
authorDave Cobbley <david.j.cobbley@linux.intel.com>2018-08-14 10:05:37 -0700
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-08-22 21:26:31 -0400
commiteb8dc40360f0cfef56fb6947cc817a547d6d9bc6 (patch)
treede291a73dc37168da6370e2cf16c347d1eba9df8 /poky/scripts
parent9c3cf826d853102535ead04cebc2d6023eff3032 (diff)
downloadtalos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.tar.gz
talos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.zip
[Subtree] Removing import-layers directory
As part of the move to subtrees, need to bring all the import layers content to the top level. Change-Id: I4a163d10898cbc6e11c27f776f60e1a470049d8f Signed-off-by: Dave Cobbley <david.j.cobbley@linux.intel.com> Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'poky/scripts')
-rw-r--r--poky/scripts/README1
-rwxr-xr-xpoky/scripts/bitbake-prserv-tool112
-rwxr-xr-xpoky/scripts/bitbake-whatchanged330
-rwxr-xr-xpoky/scripts/buildhistory-collect-srcrevs117
-rwxr-xr-xpoky/scripts/buildhistory-diff139
-rwxr-xr-xpoky/scripts/buildstats-diff306
-rwxr-xr-xpoky/scripts/combo-layer1376
-rwxr-xr-xpoky/scripts/combo-layer-hook-default.sh20
-rw-r--r--poky/scripts/combo-layer.conf.example93
-rwxr-xr-xpoky/scripts/contrib/bb-perf/bb-matrix-plot.sh137
-rwxr-xr-xpoky/scripts/contrib/bb-perf/bb-matrix.sh79
-rwxr-xr-xpoky/scripts/contrib/bb-perf/buildstats-plot.sh157
-rwxr-xr-xpoky/scripts/contrib/bb-perf/buildstats.sh155
-rwxr-xr-xpoky/scripts/contrib/bbvars.py180
-rwxr-xr-xpoky/scripts/contrib/build-perf-test-wrapper.sh239
-rwxr-xr-xpoky/scripts/contrib/build-perf-test.sh400
-rwxr-xr-xpoky/scripts/contrib/ddimage108
-rwxr-xr-xpoky/scripts/contrib/devtool-stress.py256
-rwxr-xr-xpoky/scripts/contrib/dialog-power-control53
-rwxr-xr-xpoky/scripts/contrib/documentation-audit.sh94
-rwxr-xr-xpoky/scripts/contrib/graph-tool91
-rwxr-xr-xpoky/scripts/contrib/list-packageconfig-flags.py178
-rwxr-xr-xpoky/scripts/contrib/mkefidisk.sh464
-rwxr-xr-xpoky/scripts/contrib/oe-build-perf-report-email.py282
-rwxr-xr-xpoky/scripts/contrib/patchreview.py211
-rwxr-xr-xpoky/scripts/contrib/patchtest.sh118
-rwxr-xr-xpoky/scripts/contrib/serdevtry60
-rwxr-xr-xpoky/scripts/contrib/test_build_time.sh237
-rwxr-xr-xpoky/scripts/contrib/test_build_time_worker.sh37
-rwxr-xr-xpoky/scripts/contrib/uncovered39
-rwxr-xr-xpoky/scripts/contrib/verify-homepage.py62
-rwxr-xr-xpoky/scripts/cp-noerror52
-rwxr-xr-xpoky/scripts/create-pull-request303
-rwxr-xr-xpoky/scripts/crosstap469
-rwxr-xr-xpoky/scripts/devtool349
-rwxr-xr-xpoky/scripts/distro/build-recipe-list.py129
-rwxr-xr-xpoky/scripts/distro/distrocompare.sh123
-rwxr-xr-xpoky/scripts/gen-lockedsig-cache74
-rwxr-xr-xpoky/scripts/gen-site-config53
-rw-r--r--poky/scripts/lib/argparse_oe.py176
-rw-r--r--poky/scripts/lib/build_perf/__init__.py31
-rw-r--r--poky/scripts/lib/build_perf/html.py19
-rw-r--r--poky/scripts/lib/build_perf/html/measurement_chart.html50
-rw-r--r--poky/scripts/lib/build_perf/html/report.html286
-rw-r--r--poky/scripts/lib/build_perf/report.py345
-rw-r--r--poky/scripts/lib/build_perf/scrape-html-report.js56
-rw-r--r--poky/scripts/lib/buildstats.py349
-rw-r--r--poky/scripts/lib/checklayer/__init__.py394
-rw-r--r--poky/scripts/lib/checklayer/case.py7
-rw-r--r--poky/scripts/lib/checklayer/cases/__init__.py0
-rw-r--r--poky/scripts/lib/checklayer/cases/bsp.py204
-rw-r--r--poky/scripts/lib/checklayer/cases/common.py58
-rw-r--r--poky/scripts/lib/checklayer/cases/distro.py26
-rw-r--r--poky/scripts/lib/checklayer/context.py15
-rw-r--r--poky/scripts/lib/devtool/__init__.py383
-rw-r--r--poky/scripts/lib/devtool/build.py86
-rw-r--r--poky/scripts/lib/devtool/build_image.py174
-rw-r--r--poky/scripts/lib/devtool/build_sdk.py65
-rw-r--r--poky/scripts/lib/devtool/deploy.py350
-rw-r--r--poky/scripts/lib/devtool/export.py119
-rw-r--r--poky/scripts/lib/devtool/import.py144
-rw-r--r--poky/scripts/lib/devtool/package.py60
-rw-r--r--poky/scripts/lib/devtool/runqemu.py74
-rw-r--r--poky/scripts/lib/devtool/sdk.py336
-rw-r--r--poky/scripts/lib/devtool/search.py118
-rw-r--r--poky/scripts/lib/devtool/standard.py2164
-rw-r--r--poky/scripts/lib/devtool/upgrade.py626
-rw-r--r--poky/scripts/lib/devtool/utilcmds.py252
-rw-r--r--poky/scripts/lib/recipetool/__init__.py0
-rw-r--r--poky/scripts/lib/recipetool/append.py457
-rw-r--r--poky/scripts/lib/recipetool/create.py1329
-rw-r--r--poky/scripts/lib/recipetool/create_buildsys.py893
-rw-r--r--poky/scripts/lib/recipetool/create_buildsys_python.py719
-rw-r--r--poky/scripts/lib/recipetool/create_kernel.py99
-rw-r--r--poky/scripts/lib/recipetool/create_kmod.py152
-rw-r--r--poky/scripts/lib/recipetool/create_npm.py330
-rw-r--r--poky/scripts/lib/recipetool/newappend.py89
-rw-r--r--poky/scripts/lib/recipetool/setvar.py75
-rw-r--r--poky/scripts/lib/scriptpath.py42
-rw-r--r--poky/scripts/lib/scriptutils.py241
-rw-r--r--poky/scripts/lib/wic/__init__.py20
-rw-r--r--poky/scripts/lib/wic/canned-wks/common.wks.inc3
-rw-r--r--poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg27
-rw-r--r--poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks8
-rw-r--r--poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks10
-rw-r--r--poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks23
-rw-r--r--poky/scripts/lib/wic/canned-wks/directdisk.wks8
-rw-r--r--poky/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in3
-rw-r--r--poky/scripts/lib/wic/canned-wks/mkefidisk.wks11
-rw-r--r--poky/scripts/lib/wic/canned-wks/mkhybridiso.wks7
-rw-r--r--poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks8
-rw-r--r--poky/scripts/lib/wic/canned-wks/sdimage-bootpart.wks6
-rw-r--r--poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks11
-rw-r--r--poky/scripts/lib/wic/engine.py565
-rw-r--r--poky/scripts/lib/wic/filemap.py600
-rw-r--r--poky/scripts/lib/wic/help.py1055
-rw-r--r--poky/scripts/lib/wic/ksparser.py235
-rw-r--r--poky/scripts/lib/wic/misc.py263
-rw-r--r--poky/scripts/lib/wic/partition.py425
-rw-r--r--poky/scripts/lib/wic/pluginbase.py149
-rw-r--r--poky/scripts/lib/wic/plugins/imager/direct.py607
-rw-r--r--poky/scripts/lib/wic/plugins/source/bootimg-efi.py258
-rw-r--r--poky/scripts/lib/wic/plugins/source/bootimg-partition.py132
-rw-r--r--poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py207
-rw-r--r--poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py466
-rw-r--r--poky/scripts/lib/wic/plugins/source/rawcopy.py91
-rw-r--r--poky/scripts/lib/wic/plugins/source/rootfs.py126
-rwxr-xr-xpoky/scripts/lnr21
-rw-r--r--poky/scripts/multilib_header_wrapper.h61
-rwxr-xr-xpoky/scripts/native-intercept/chown2
-rwxr-xr-xpoky/scripts/oe-build-perf-report661
-rwxr-xr-xpoky/scripts/oe-build-perf-test223
-rwxr-xr-xpoky/scripts/oe-buildenv-internal140
-rwxr-xr-xpoky/scripts/oe-check-sstate121
-rwxr-xr-xpoky/scripts/oe-depends-dot121
-rwxr-xr-xpoky/scripts/oe-find-native-sysroot115
-rwxr-xr-xpoky/scripts/oe-git-archive271
-rwxr-xr-xpoky/scripts/oe-git-proxy181
-rwxr-xr-xpoky/scripts/oe-gnome-terminal-phonehome10
-rwxr-xr-xpoky/scripts/oe-pkgdata-util630
-rwxr-xr-xpoky/scripts/oe-publish-sdk155
-rwxr-xr-xpoky/scripts/oe-run-native68
-rwxr-xr-xpoky/scripts/oe-selftest75
-rwxr-xr-xpoky/scripts/oe-setup-builddir140
-rwxr-xr-xpoky/scripts/oe-test81
-rwxr-xr-xpoky/scripts/oe-trim-schemas58
-rwxr-xr-xpoky/scripts/oepydevshell-internal.py97
-rwxr-xr-xpoky/scripts/opkg-query-helper.py85
-rw-r--r--poky/scripts/postinst-intercepts/delay_to_first_boot2
-rwxr-xr-xpoky/scripts/postinst-intercepts/postinst_intercept56
-rw-r--r--poky/scripts/postinst-intercepts/update_font_cache7
-rw-r--r--poky/scripts/postinst-intercepts/update_gio_module_cache9
-rw-r--r--poky/scripts/postinst-intercepts/update_icon_cache13
-rw-r--r--poky/scripts/postinst-intercepts/update_pixbuf_cache11
-rw-r--r--poky/scripts/pybootchartgui/AUTHORS11
-rw-r--r--poky/scripts/pybootchartgui/COPYING340
-rw-r--r--poky/scripts/pybootchartgui/MAINTAINERS3
-rw-r--r--poky/scripts/pybootchartgui/NEWS204
-rw-r--r--poky/scripts/pybootchartgui/README.pybootchart37
-rwxr-xr-xpoky/scripts/pybootchartgui/pybootchartgui.py23
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/__init__.py0
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/batch.py46
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/draw.py968
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/gui.py350
l---------poky/scripts/pybootchartgui/pybootchartgui/main.py1
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/main.py.in183
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/parsing.py821
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/process_tree.py292
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/samples.py178
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py105
-rw-r--r--poky/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py92
-rwxr-xr-xpoky/scripts/pythondeps250
-rwxr-xr-xpoky/scripts/recipetool126
-rwxr-xr-xpoky/scripts/relocate_sdk.py266
-rw-r--r--poky/scripts/rootfs_rpm-extract-postinst.awk11
-rwxr-xr-xpoky/scripts/rpm2cpio.sh55
-rwxr-xr-xpoky/scripts/runqemu1307
-rwxr-xr-xpoky/scripts/runqemu-addptable2image51
-rwxr-xr-xpoky/scripts/runqemu-export-rootfs156
-rwxr-xr-xpoky/scripts/runqemu-extract-sdk104
-rwxr-xr-xpoky/scripts/runqemu-gen-tapdevs108
-rwxr-xr-xpoky/scripts/runqemu-ifdown66
-rwxr-xr-xpoky/scripts/runqemu-ifup121
-rw-r--r--poky/scripts/runqemu.README42
-rwxr-xr-xpoky/scripts/send-error-report200
-rwxr-xr-xpoky/scripts/send-pull-request184
-rwxr-xr-xpoky/scripts/sstate-cache-management.sh469
-rwxr-xr-xpoky/scripts/sstate-diff-machines.sh172
-rwxr-xr-xpoky/scripts/sstate-sysroot-cruft.sh199
-rwxr-xr-xpoky/scripts/sysroot-relativelinks.py31
-rwxr-xr-xpoky/scripts/task-time132
-rwxr-xr-xpoky/scripts/test-reexec123
-rwxr-xr-xpoky/scripts/test-remote-image357
-rwxr-xr-xpoky/scripts/tiny/dirsize.py89
-rwxr-xr-xpoky/scripts/tiny/ksize.py170
-rwxr-xr-xpoky/scripts/tiny/ksum.py168
-rwxr-xr-xpoky/scripts/verify-bashisms155
-rwxr-xr-xpoky/scripts/wic542
-rwxr-xr-xpoky/scripts/yocto-check-layer208
-rwxr-xr-xpoky/scripts/yocto-check-layer-wrapper43
180 files changed, 37373 insertions, 0 deletions
diff --git a/poky/scripts/README b/poky/scripts/README
new file mode 100644
index 000000000..1b8d12724
--- /dev/null
+++ b/poky/scripts/README
@@ -0,0 +1 @@
+This directory contains Various useful scripts for working with OE builds
diff --git a/poky/scripts/bitbake-prserv-tool b/poky/scripts/bitbake-prserv-tool
new file mode 100755
index 000000000..fa31b5258
--- /dev/null
+++ b/poky/scripts/bitbake-prserv-tool
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+
+help ()
+{
+ base=`basename $0`
+ echo -e "Usage: $base command"
+ echo "Avaliable commands:"
+ echo -e "\texport <file.conf>: export and lock down the AUTOPR values from the PR service into a file for release."
+ echo -e "\timport <file.conf>: import the AUTOPR values from the exported file into the PR service."
+}
+
+clean_cache()
+{
+ s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
+ if [ "x${s}" != "x" ]; then
+ rm -rf ${s}
+ fi
+}
+
+do_export ()
+{
+ file=$1
+ [ "x${file}" == "x" ] && help && exit 1
+ rm -f ${file}
+
+ clean_cache
+ bitbake -R conf/prexport.conf -p
+ s=`bitbake -R conf/prexport.conf -e | grep ^PRSERV_DUMPFILE= | cut -f2 -d\"`
+ if [ "x${s}" != "x" ];
+ then
+ [ -e $s ] && mv -f $s $file && echo "Exporting to file $file succeeded!"
+ return 0
+ fi
+ echo "Exporting to file $file failed!"
+ return 1
+}
+
+do_import ()
+{
+ file=$1
+ [ "x${file}" == "x" ] && help && exit 1
+
+ clean_cache
+ bitbake -R conf/primport.conf -R $file -p
+ ret=$?
+ [ $ret -eq 0 ] && echo "Importing from file $file succeeded!" || echo "Importing from file $file failed!"
+ return $ret
+}
+
+do_migrate_localcount ()
+{
+ df=`bitbake -R conf/migrate_localcount.conf -e | \
+ grep ^LOCALCOUNT_DUMPFILE= | cut -f2 -d\"`
+ if [ "x${df}" == "x" ];
+ then
+ echo "LOCALCOUNT_DUMPFILE is not defined!"
+ return 1
+ fi
+
+ rm -rf $df
+ clean_cache
+ echo "Exporting LOCALCOUNT to AUTOINCs..."
+ bitbake -R conf/migrate_localcount.conf -p
+ [ ! $? -eq 0 ] && echo "Exporting to file $df failed!" && exit 1
+
+ if [ -e $df ];
+ then
+ echo "Exporting to file $df succeeded!"
+ else
+ echo "Exporting to file $df failed!"
+ exit 1
+ fi
+
+ echo "Importing generated AUTOINC entries..."
+ [ -e $df ] && do_import $df
+
+ if [ ! $? -eq 0 ]
+ then
+ echo "Migration from LOCALCOUNT to AUTOINCs failed!"
+ return 1
+ fi
+
+ echo "Migration from LOCALCOUNT to AUTOINCs succeeded!"
+ return 0
+}
+
+[ $# -eq 0 ] && help && exit 1
+
+case $2 in
+*.conf|*.inc)
+ ;;
+*)
+ echo ERROR: $2 must end with .conf or .inc!
+ exit 1
+ ;;
+esac
+
+case $1 in
+export)
+ do_export $2
+ ;;
+import)
+ do_import $2
+ ;;
+migrate_localcount)
+ do_migrate_localcount
+ ;;
+*)
+ help
+ exit 1
+ ;;
+esac
diff --git a/poky/scripts/bitbake-whatchanged b/poky/scripts/bitbake-whatchanged
new file mode 100755
index 000000000..0207777e6
--- /dev/null
+++ b/poky/scripts/bitbake-whatchanged
@@ -0,0 +1,330 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+
+# Copyright (c) 2013 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os
+import sys
+import getopt
+import shutil
+import re
+import warnings
+import subprocess
+import argparse
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+
+# Figure out where is the bitbake/lib/bb since we need bb.siggen and bb.process
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
+ sys.exit(1)
+scriptpath.add_oe_lib_path()
+import argparse_oe
+
+import bb.siggen
+import bb.process
+
+# Match the stamp's filename
+# group(1): PE_PV (may no PE)
+# group(2): PR
+# group(3): TASK
+# group(4): HASH
+stamp_re = re.compile("(?P<pv>.*)-(?P<pr>r\d+)\.(?P<task>do_\w+)\.(?P<hash>[^\.]*)")
+sigdata_re = re.compile(".*\.sigdata\..*")
+
+def gen_dict(stamps):
+ """
+ Generate the dict from the stamps dir.
+ The output dict format is:
+ {fake_f: {pn: PN, pv: PV, pr: PR, task: TASK, path: PATH}}
+ Where:
+ fake_f: pv + task + hash
+ path: the path to the stamp file
+ """
+ # The member of the sub dict (A "path" will be appended below)
+ sub_mem = ("pv", "pr", "task")
+ d = {}
+ for dirpath, _, files in os.walk(stamps):
+ for f in files:
+ # The "bitbake -S" would generate ".sigdata", but no "_setscene".
+ fake_f = re.sub('_setscene.', '.', f)
+ fake_f = re.sub('.sigdata', '', fake_f)
+ subdict = {}
+ tmp = stamp_re.match(fake_f)
+ if tmp:
+ for i in sub_mem:
+ subdict[i] = tmp.group(i)
+ if len(subdict) != 0:
+ pn = os.path.basename(dirpath)
+ subdict['pn'] = pn
+ # The path will be used by os.stat() and bb.siggen
+ subdict['path'] = dirpath + "/" + f
+ fake_f = tmp.group('pv') + tmp.group('task') + tmp.group('hash')
+ d[fake_f] = subdict
+ return d
+
+# Re-construct the dict
+def recon_dict(dict_in):
+ """
+ The output dict format is:
+ {pn_task: {pv: PV, pr: PR, path: PATH}}
+ """
+ dict_out = {}
+ for k in dict_in.keys():
+ subdict = {}
+ # The key
+ pn_task = "%s_%s" % (dict_in.get(k).get('pn'), dict_in.get(k).get('task'))
+ # If more than one stamps are found, use the latest one.
+ if pn_task in dict_out:
+ full_path_pre = dict_out.get(pn_task).get('path')
+ full_path_cur = dict_in.get(k).get('path')
+ if os.stat(full_path_pre).st_mtime > os.stat(full_path_cur).st_mtime:
+ continue
+ subdict['pv'] = dict_in.get(k).get('pv')
+ subdict['pr'] = dict_in.get(k).get('pr')
+ subdict['path'] = dict_in.get(k).get('path')
+ dict_out[pn_task] = subdict
+
+ return dict_out
+
+def split_pntask(s):
+ """
+ Split the pn_task in to (pn, task) and return it
+ """
+ tmp = re.match("(.*)_(do_.*)", s)
+ return (tmp.group(1), tmp.group(2))
+
+
+def print_added(d_new = None, d_old = None):
+ """
+ Print the newly added tasks
+ """
+ added = {}
+ for k in list(d_new.keys()):
+ if k not in d_old:
+ # Add the new one to added dict, and remove it from
+ # d_new, so the remaining ones are the changed ones
+ added[k] = d_new.get(k)
+ del(d_new[k])
+
+ if not added:
+ return 0
+
+ # Format the output, the dict format is:
+ # {pn: task1, task2 ...}
+ added_format = {}
+ counter = 0
+ for k in added.keys():
+ pn, task = split_pntask(k)
+ if pn in added_format:
+ # Append the value
+ added_format[pn] = "%s %s" % (added_format.get(pn), task)
+ else:
+ added_format[pn] = task
+ counter += 1
+ print("=== Newly added tasks: (%s tasks)" % counter)
+ for k in added_format.keys():
+ print(" %s: %s" % (k, added_format.get(k)))
+
+ return counter
+
+def print_vrchanged(d_new = None, d_old = None, vr = None):
+ """
+ Print the pv or pr changed tasks.
+ The arg "vr" is "pv" or "pr"
+ """
+ pvchanged = {}
+ counter = 0
+ for k in list(d_new.keys()):
+ if d_new.get(k).get(vr) != d_old.get(k).get(vr):
+ counter += 1
+ pn, task = split_pntask(k)
+ if pn not in pvchanged:
+ # Format the output, we only print pn (no task) since
+ # all the tasks would be changed when pn or pr changed,
+ # the dict format is:
+ # {pn: pv/pr_old -> pv/pr_new}
+ pvchanged[pn] = "%s -> %s" % (d_old.get(k).get(vr), d_new.get(k).get(vr))
+ del(d_new[k])
+
+ if not pvchanged:
+ return 0
+
+ print("\n=== %s changed: (%s tasks)" % (vr.upper(), counter))
+ for k in pvchanged.keys():
+ print(" %s: %s" % (k, pvchanged.get(k)))
+
+ return counter
+
+def print_depchanged(d_new = None, d_old = None, verbose = False):
+ """
+ Print the dependency changes
+ """
+ depchanged = {}
+ counter = 0
+ for k in d_new.keys():
+ counter += 1
+ pn, task = split_pntask(k)
+ if (verbose):
+ full_path_old = d_old.get(k).get("path")
+ full_path_new = d_new.get(k).get("path")
+ # No counter since it is not ready here
+ if sigdata_re.match(full_path_old) and sigdata_re.match(full_path_new):
+ output = bb.siggen.compare_sigfiles(full_path_old, full_path_new)
+ if output:
+ print("\n=== The verbose changes of %s.%s:" % (pn, task))
+ print('\n'.join(output))
+ else:
+ # Format the output, the format is:
+ # {pn: task1, task2, ...}
+ if pn in depchanged:
+ depchanged[pn] = "%s %s" % (depchanged.get(pn), task)
+ else:
+ depchanged[pn] = task
+
+ if len(depchanged) > 0:
+ print("\n=== Dependencies changed: (%s tasks)" % counter)
+ for k in depchanged.keys():
+ print(" %s: %s" % (k, depchanged[k]))
+
+ return counter
+
+
+def main():
+ """
+ Print what will be done between the current and last builds:
+ 1) Run "STAMPS_DIR=<path> bitbake -S recipe" to re-generate the stamps
+ 2) Figure out what are newly added and changed, can't figure out
+ what are removed since we can't know the previous stamps
+ clearly, for example, if there are several builds, we can't know
+ which stamps the last build has used exactly.
+ 3) Use bb.siggen.compare_sigfiles to diff the old and new stamps
+ """
+
+ parser = argparse_oe.ArgumentParser(usage = """%(prog)s [options] [package ...]
+print what will be done between the current and last builds, for example:
+
+ $ bitbake core-image-sato
+ # Edit the recipes
+ $ bitbake-whatchanged core-image-sato
+
+The changes will be printed"
+
+Note:
+ The amount of tasks is not accurate when the task is "do_build" since
+ it usually depends on other tasks.
+ The "nostamp" task is not included.
+"""
+)
+ parser.add_argument("recipe", help="recipe to check")
+ parser.add_argument("-v", "--verbose", help = "print the verbose changes", action = "store_true")
+ args = parser.parse_args()
+
+ # Get the STAMPS_DIR
+ print("Figuring out the STAMPS_DIR ...")
+ cmdline = "bitbake -e | sed -ne 's/^STAMPS_DIR=\"\(.*\)\"/\\1/p'"
+ try:
+ stampsdir, err = bb.process.run(cmdline)
+ except:
+ raise
+ if not stampsdir:
+ print("ERROR: No STAMPS_DIR found for '%s'" % args.recipe, file=sys.stderr)
+ return 2
+ stampsdir = stampsdir.rstrip("\n")
+ if not os.path.isdir(stampsdir):
+ print("ERROR: stamps directory \"%s\" not found!" % stampsdir, file=sys.stderr)
+ return 2
+
+ # The new stamps dir
+ new_stampsdir = stampsdir + ".bbs"
+ if os.path.exists(new_stampsdir):
+ print("ERROR: %s already exists!" % new_stampsdir, file=sys.stderr)
+ return 2
+
+ try:
+ # Generate the new stamps dir
+ print("Generating the new stamps ... (need several minutes)")
+ cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, args.recipe)
+ # FIXME
+ # The "bitbake -S" may fail, not fatal error, the stamps will still
+ # be generated, this might be a bug of "bitbake -S".
+ try:
+ bb.process.run(cmdline)
+ except Exception as exc:
+ print(exc)
+
+ # The dict for the new and old stamps.
+ old_dict = gen_dict(stampsdir)
+ new_dict = gen_dict(new_stampsdir)
+
+ # Remove the same one from both stamps.
+ cnt_unchanged = 0
+ for k in list(new_dict.keys()):
+ if k in old_dict:
+ cnt_unchanged += 1
+ del(new_dict[k])
+ del(old_dict[k])
+
+ # Re-construct the dict to easily find out what is added or changed.
+ # The dict format is:
+ # {pn_task: {pv: PV, pr: PR, path: PATH}}
+ new_recon = recon_dict(new_dict)
+ old_recon = recon_dict(old_dict)
+
+ del new_dict
+ del old_dict
+
+ # Figure out what are changed, the new_recon would be changed
+ # by the print_xxx function.
+ # Newly added
+ cnt_added = print_added(new_recon, old_recon)
+
+ # PV (including PE) and PR changed
+ # Let the bb.siggen handle them if verbose
+ cnt_rv = {}
+ if not args.verbose:
+ for i in ('pv', 'pr'):
+ cnt_rv[i] = print_vrchanged(new_recon, old_recon, i)
+
+ # Dependencies changed (use bitbake-diffsigs)
+ cnt_dep = print_depchanged(new_recon, old_recon, args.verbose)
+
+ total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep
+
+ print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged))
+ if args.verbose:
+ print("Newly added: %s\nDependencies changed: %s\n" % \
+ (cnt_added, cnt_dep))
+ else:
+ print("Newly added: %s\nPV changed: %s\nPR changed: %s\nDependencies changed: %s\n" % \
+ (cnt_added, cnt_rv.get('pv') or 0, cnt_rv.get('pr') or 0, cnt_dep))
+ except:
+ print("ERROR occurred!")
+ raise
+ finally:
+ # Remove the newly generated stamps dir
+ if os.path.exists(new_stampsdir):
+ print("Removing the newly generated stamps dir ...")
+ shutil.rmtree(new_stampsdir)
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/buildhistory-collect-srcrevs b/poky/scripts/buildhistory-collect-srcrevs
new file mode 100755
index 000000000..d375b045d
--- /dev/null
+++ b/poky/scripts/buildhistory-collect-srcrevs
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+#
+# Collects the recorded SRCREV values from buildhistory and reports on them
+#
+# Copyright 2013 Intel Corporation
+# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import collections
+import os
+import sys
+import optparse
+import logging
+
+def logger_create():
+ logger = logging.getLogger("buildhistory")
+ loggerhandler = logging.StreamHandler()
+ loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(loggerhandler)
+ logger.setLevel(logging.INFO)
+ return logger
+
+logger = logger_create()
+
+def main():
+ parser = optparse.OptionParser(
+ description = "Collects the recorded SRCREV values from buildhistory and reports on them.",
+ usage = """
+ %prog [options]""")
+
+ parser.add_option("-a", "--report-all",
+ help = "Report all SRCREV values, not just ones where AUTOREV has been used",
+ action="store_true", dest="reportall")
+ parser.add_option("-f", "--forcevariable",
+ help = "Use forcevariable override for all output lines",
+ action="store_true", dest="forcevariable")
+ parser.add_option("-p", "--buildhistory-dir",
+ help = "Specify path to buildhistory directory (defaults to buildhistory/ under cwd)",
+ action="store", dest="buildhistory_dir", default='buildhistory/')
+
+ options, args = parser.parse_args(sys.argv)
+
+ if len(args) > 1:
+ sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args[1:]))
+ parser.print_help()
+ sys.exit(1)
+
+ if not os.path.exists(options.buildhistory_dir):
+ sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % options.buildhistory_dir)
+ parser.print_help()
+ sys.exit(1)
+
+ if options.forcevariable:
+ forcevariable = '_forcevariable'
+ else:
+ forcevariable = ''
+
+ all_srcrevs = collections.defaultdict(list)
+ for root, dirs, files in os.walk(options.buildhistory_dir):
+ if '.git' in dirs:
+ dirs.remove('.git')
+ for fn in files:
+ if fn == 'latest_srcrev':
+ curdir = os.path.basename(os.path.dirname(root))
+ fullpath = os.path.join(root, fn)
+ pn = os.path.basename(root)
+ srcrev = None
+ orig_srcrev = None
+ orig_srcrevs = {}
+ srcrevs = {}
+ with open(fullpath) as f:
+ for line in f:
+ if '=' in line:
+ splitval = line.split('=')
+ value = splitval[1].strip('" \t\n\r')
+ if line.startswith('# SRCREV = '):
+ orig_srcrev = value
+ elif line.startswith('# SRCREV_'):
+ splitval = line.split('=')
+ name = splitval[0].split('_')[1].strip()
+ orig_srcrevs[name] = value
+ elif line.startswith('SRCREV ='):
+ srcrev = value
+ elif line.startswith('SRCREV_'):
+ name = splitval[0].split('_')[1].strip()
+ srcrevs[name] = value
+ if srcrev and (options.reportall or srcrev != orig_srcrev):
+ all_srcrevs[curdir].append((pn, None, srcrev))
+ for name, value in srcrevs.items():
+ orig = orig_srcrevs.get(name, orig_srcrev)
+ if options.reportall or value != orig:
+ all_srcrevs[curdir].append((pn, name, value))
+
+ for curdir, srcrevs in sorted(all_srcrevs.items()):
+ if srcrevs:
+ print('# %s' % curdir)
+ for pn, name, srcrev in srcrevs:
+ if name:
+ print('SRCREV_%s_pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
+ else:
+ print('SRCREV_pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/buildhistory-diff b/poky/scripts/buildhistory-diff
new file mode 100755
index 000000000..70805b067
--- /dev/null
+++ b/poky/scripts/buildhistory-diff
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+
+# Report significant differences in the buildhistory repository since a specific revision
+#
+# Copyright (C) 2013 Intel Corporation
+# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+
+import sys
+import os
+import argparse
+from distutils.version import LooseVersion
+
+# Ensure PythonGit is installed (buildhistory_analysis needs it)
+try:
+ import git
+except ImportError:
+ print("Please install GitPython (python3-git) 0.3.4 or later in order to use this script")
+ sys.exit(1)
+
+def get_args_parser():
+ description = "Reports significant differences in the buildhistory repository."
+
+ parser = argparse.ArgumentParser(description=description,
+ usage="""
+ %(prog)s [options] [from-revision [to-revision]]
+ (if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""")
+
+ parser.add_argument('-p', '--buildhistory-dir',
+ action='store',
+ dest='buildhistory_dir',
+ default='buildhistory/',
+ help="Specify path to buildhistory directory (defaults to buildhistory/ under cwd)")
+ parser.add_argument('-v', '--report-version',
+ action='store_true',
+ dest='report_ver',
+ default=False,
+ help="Report changes in PKGE/PKGV/PKGR even when the values are still the default (PE/PV/PR)")
+ parser.add_argument('-a', '--report-all',
+ action='store_true',
+ dest='report_all',
+ default=False,
+ help="Report all changes, not just the default significant ones")
+ parser.add_argument('-s', '---signatures',
+ action='store_true',
+ dest='sigs',
+ default=False,
+ help="Report list of signatures differing instead of output")
+ parser.add_argument('-S', '--signatures-with-diff',
+ action='store_true',
+ dest='sigsdiff',
+ default=False,
+ help="Report on actual signature differences instead of output (requires signature data to have been generated, either by running the actual tasks or using bitbake -S)")
+ parser.add_argument('-e', '--exclude-path',
+ action='append',
+ help="Exclude path from the output")
+ parser.add_argument('-c', '--colour',
+ choices=('yes', 'no', 'auto'),
+ default="auto",
+ help="Whether to colourise (defaults to auto)")
+ parser.add_argument('revisions',
+ default = ['build-minus-1', 'HEAD'],
+ nargs='*',
+ help=argparse.SUPPRESS)
+ return parser
+
+def main():
+
+ parser = get_args_parser()
+ args = parser.parse_args()
+
+ if LooseVersion(git.__version__) < '0.3.1':
+ sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
+ sys.exit(1)
+
+ if len(args.revisions) > 2:
+ sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
+ parser.print_help()
+
+ sys.exit(1)
+ if not os.path.exists(args.buildhistory_dir):
+ if args.buildhistory_dir == 'buildhistory/':
+ cwd = os.getcwd()
+ if os.path.basename(cwd) == 'buildhistory':
+ args.buildhistory_dir = cwd
+
+ if not os.path.exists(args.buildhistory_dir):
+ sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % args.buildhistory_dir)
+ parser.print_help()
+ sys.exit(1)
+
+ scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
+ lib_path = scripts_path + '/lib'
+ sys.path = sys.path + [lib_path]
+
+ import scriptpath
+
+ # Set path to OE lib dir so we can import the buildhistory_analysis module
+ scriptpath.add_oe_lib_path()
+ # Set path to bitbake lib dir so the buildhistory_analysis module can load bb.utils
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+
+ if not bitbakepath:
+ sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
+ sys.exit(1)
+
+ if len(args.revisions) == 1:
+ if '..' in args.revisions[0]:
+ fromrev, torev = args.revisions[0].split('..')
+ else:
+ fromrev, torev = args.revisions[0], 'HEAD'
+ elif len(args.revisions) == 2:
+ fromrev, torev = args.revisions
+
+ from oe.buildhistory_analysis import init_colours, process_changes
+ import gitdb
+
+ init_colours({"yes": True, "no": False, "auto": sys.stdout.isatty()}[args.colour])
+
+ try:
+ changes = process_changes(args.buildhistory_dir, fromrev, torev,
+ args.report_all, args.report_ver, args.sigs,
+ args.sigsdiff, args.exclude_path)
+ except gitdb.exc.BadObject as e:
+ if not args.revisions:
+ sys.stderr.write("Unable to find previous build revision in buildhistory repository\n\n")
+ parser.print_help()
+ else:
+ sys.stderr.write('Specified git revision "%s" is not valid\n' % e.args[0])
+ sys.exit(1)
+
+ for chg in changes:
+ out = str(chg)
+ if out:
+ print(out)
+
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/buildstats-diff b/poky/scripts/buildstats-diff
new file mode 100755
index 000000000..a128dd324
--- /dev/null
+++ b/poky/scripts/buildstats-diff
@@ -0,0 +1,306 @@
+#!/usr/bin/python3
+#
+# Script for comparing buildstats from two different builds
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import glob
+import logging
+import math
+import os
+import sys
+from operator import attrgetter
+
+# Import oe libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+from buildstats import BuildStats, diff_buildstats, taskdiff_fields, BSVerDiff
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger()
+
+
+class ScriptError(Exception):
+ """Exception for internal error handling of this script"""
+ pass
+
+
+def read_buildstats(path, multi):
+ """Read buildstats"""
+ if not os.path.exists(path):
+ raise ScriptError("No such file or directory: {}".format(path))
+
+ if os.path.isfile(path):
+ return BuildStats.from_file_json(path)
+
+ if os.path.isfile(os.path.join(path, 'build_stats')):
+ return BuildStats.from_dir(path)
+
+ # Handle a non-buildstat directory
+ subpaths = sorted(glob.glob(path + '/*'))
+ if len(subpaths) > 1:
+ if multi:
+ log.info("Averaging over {} buildstats from {}".format(
+ len(subpaths), path))
+ else:
+ raise ScriptError("Multiple buildstats found in '{}'. Please give "
+ "a single buildstat directory of use the --multi "
+ "option".format(path))
+ bs = None
+ for subpath in subpaths:
+ if os.path.isfile(subpath):
+ _bs = BuildStats.from_file_json(subpath)
+ else:
+ _bs = BuildStats.from_dir(subpath)
+ if bs is None:
+ bs = _bs
+ else:
+ bs.aggregate(_bs)
+ if not bs:
+ raise ScriptError("No buildstats found under {}".format(path))
+
+ return bs
+
+
+def print_ver_diff(bs1, bs2):
+ """Print package version differences"""
+
+ diff = BSVerDiff(bs1, bs2)
+
+ maxlen = max([len(r) for r in set(bs1.keys()).union(set(bs2.keys()))])
+ fmt_str = " {:{maxlen}} ({})"
+
+ if diff.new:
+ print("\nNEW RECIPES:")
+ print("------------")
+ for name, val in sorted(diff.new.items()):
+ print(fmt_str.format(name, val.nevr, maxlen=maxlen))
+
+ if diff.dropped:
+ print("\nDROPPED RECIPES:")
+ print("----------------")
+ for name, val in sorted(diff.dropped.items()):
+ print(fmt_str.format(name, val.nevr, maxlen=maxlen))
+
+ fmt_str = " {0:{maxlen}} {1:<20} ({2})"
+ if diff.rchanged:
+ print("\nREVISION CHANGED:")
+ print("-----------------")
+ for name, val in sorted(diff.rchanged.items()):
+ field1 = "{} -> {}".format(val.left.revision, val.right.revision)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
+
+ if diff.vchanged:
+ print("\nVERSION CHANGED:")
+ print("----------------")
+ for name, val in sorted(diff.vchanged.items()):
+ field1 = "{} -> {}".format(val.left.version, val.right.version)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
+
+ if diff.echanged:
+ print("\nEPOCH CHANGED:")
+ print("--------------")
+ for name, val in sorted(diff.echanged.items()):
+ field1 = "{} -> {}".format(val.left.epoch, val.right.epoch)
+ field2 = "{} -> {}".format(val.left.nevr, val.right.nevr)
+ print(fmt_str.format(name, field1, field2, maxlen=maxlen))
+
+
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',)):
+ """Diff task execution times"""
+ def val_to_str(val, human_readable=False):
+ """Convert raw value to printable string"""
+ def hms_time(secs):
+ """Get time in human-readable HH:MM:SS format"""
+ h = int(secs / 3600)
+ m = int((secs % 3600) / 60)
+ s = secs % 60
+ if h == 0:
+ return "{:02d}:{:04.1f}".format(m, s)
+ else:
+ return "{:d}:{:02d}:{:04.1f}".format(h, m, s)
+
+ if 'time' in val_type:
+ if human_readable:
+ return hms_time(val)
+ else:
+ return "{:.1f}s".format(val)
+ elif 'bytes' in val_type and human_readable:
+ prefix = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi']
+ dec = int(math.log(val, 2) / 10)
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}B".format(val / (2 ** (10 * dec)),
+ prefix[dec], prec=prec)
+ elif 'ops' in val_type and human_readable:
+ prefix = ['', 'k', 'M', 'G', 'T', 'P']
+ dec = int(math.log(val, 1000))
+ prec = 1 if dec > 0 else 0
+ return "{:.{prec}f}{}ops".format(val / (1000 ** dec),
+ prefix[dec], prec=prec)
+ return str(int(val))
+
+ def sum_vals(buildstats):
+ """Get cumulative sum of all tasks"""
+ total = 0.0
+ for recipe_data in buildstats.values():
+ for bs_task in recipe_data.tasks.values():
+ total += getattr(bs_task, val_type)
+ return total
+
+ if min_val:
+ print("Ignoring tasks less than {} ({})".format(
+ val_to_str(min_val, True), val_to_str(min_val)))
+ if min_absdiff:
+ print("Ignoring differences less than {} ({})".format(
+ val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
+
+ # Prepare the data
+ tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff)
+
+ # Sort our list
+ for field in reversed(sort_by):
+ if field.startswith('-'):
+ field = field[1:]
+ reverse = True
+ else:
+ reverse = False
+ tasks_diff = sorted(tasks_diff, key=attrgetter(field), reverse=reverse)
+
+ linedata = [(' ', 'PKG', ' ', 'TASK', 'ABSDIFF', 'RELDIFF',
+ val_type.upper() + '1', val_type.upper() + '2')]
+ field_lens = dict([('len_{}'.format(i), len(f)) for i, f in enumerate(linedata[0])])
+
+ # Prepare fields in string format and measure field lengths
+ for diff in tasks_diff:
+ task_prefix = diff.task_op if diff.pkg_op == ' ' else ' '
+ linedata.append((diff.pkg_op, diff.pkg, task_prefix, diff.task,
+ val_to_str(diff.absdiff),
+ '{:+.1f}%'.format(diff.reldiff),
+ val_to_str(diff.value1),
+ val_to_str(diff.value2)))
+ for i, field in enumerate(linedata[-1]):
+ key = 'len_{}'.format(i)
+ if len(field) > field_lens[key]:
+ field_lens[key] = len(field)
+
+ # Print data
+ print()
+ for fields in linedata:
+ print("{:{len_0}}{:{len_1}} {:{len_2}}{:{len_3}} {:>{len_4}} {:>{len_5}} {:>{len_6}} -> {:{len_7}}".format(
+ *fields, **field_lens))
+
+ # Print summary of the diffs
+ total1 = sum_vals(bs1)
+ total2 = sum_vals(bs2)
+ print("\nCumulative {}:".format(val_type))
+ print (" {} {:+.1f}% {} ({}) -> {} ({})".format(
+ val_to_str(total2 - total1), 100 * (total2-total1) / total1,
+ val_to_str(total1, True), val_to_str(total1),
+ val_to_str(total2, True), val_to_str(total2)))
+
+
+def parse_args(argv):
+ """Parse cmdline arguments"""
+ description="""
+Script for comparing buildstats of two separate builds."""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ min_val_defaults = {'cputime': 3.0,
+ 'read_bytes': 524288,
+ 'write_bytes': 524288,
+ 'read_ops': 500,
+ 'write_ops': 500,
+ 'walltime': 5}
+ min_absdiff_defaults = {'cputime': 1.0,
+ 'read_bytes': 131072,
+ 'write_bytes': 131072,
+ 'read_ops': 50,
+ 'write_ops': 50,
+ 'walltime': 2}
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--ver-diff', action='store_true',
+ help="Show package version differences and exit")
+ parser.add_argument('--diff-attr', default='cputime',
+ choices=min_val_defaults.keys(),
+ help="Buildstat attribute which to compare")
+ parser.add_argument('--min-val', default=min_val_defaults, type=float,
+ help="Filter out tasks less than MIN_VAL. "
+ "Default depends on --diff-attr.")
+ parser.add_argument('--min-absdiff', default=min_absdiff_defaults, type=float,
+ help="Filter out tasks whose difference is less than "
+ "MIN_ABSDIFF, Default depends on --diff-attr.")
+ parser.add_argument('--sort-by', default='absdiff',
+ help="Comma-separated list of field sort order. "
+ "Prepend the field name with '-' for reversed sort. "
+ "Available fields are: {}".format(', '.join(taskdiff_fields)))
+ parser.add_argument('--multi', action='store_true',
+ help="Read all buildstats from the given paths and "
+ "average over them")
+ parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
+ parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
+
+ args = parser.parse_args(argv)
+
+ # We do not nedd/want to read all buildstats if we just want to look at the
+ # package versions
+ if args.ver_diff:
+ args.multi = False
+
+ # Handle defaults for the filter arguments
+ if args.min_val is min_val_defaults:
+ args.min_val = min_val_defaults[args.diff_attr]
+ if args.min_absdiff is min_absdiff_defaults:
+ args.min_absdiff = min_absdiff_defaults[args.diff_attr]
+
+ return args
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ # Validate sort fields
+ sort_by = []
+ for field in args.sort_by.split(','):
+ if field.lstrip('-') not in taskdiff_fields:
+ log.error("Invalid sort field '%s' (must be one of: %s)" %
+ (field, ', '.join(taskdiff_fields)))
+ sys.exit(1)
+ sort_by.append(field)
+
+ try:
+ bs1 = read_buildstats(args.buildstats1, args.multi)
+ bs2 = read_buildstats(args.buildstats2, args.multi)
+
+ if args.ver_diff:
+ print_ver_diff(bs1, bs2)
+ else:
+ print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
+ args.min_absdiff, sort_by)
+ except ScriptError as err:
+ log.error(str(err))
+ return 1
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/combo-layer b/poky/scripts/combo-layer
new file mode 100755
index 000000000..d04d88b07
--- /dev/null
+++ b/poky/scripts/combo-layer
@@ -0,0 +1,1376 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright 2011 Intel Corporation
+# Authored-by: Yu Ke <ke.yu@intel.com>
+# Paul Eggleton <paul.eggleton@intel.com>
+# Richard Purdie <richard.purdie@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import fnmatch
+import os, sys
+import optparse
+import logging
+import subprocess
+import tempfile
+import configparser
+import re
+import copy
+import pipes
+import shutil
+from collections import OrderedDict
+from string import Template
+from functools import reduce
+
+__version__ = "0.2.1"
+
+def logger_create():
+ logger = logging.getLogger("")
+ loggerhandler = logging.StreamHandler()
+ loggerhandler.setFormatter(logging.Formatter("[%(asctime)s] %(message)s","%H:%M:%S"))
+ logger.addHandler(loggerhandler)
+ logger.setLevel(logging.INFO)
+ return logger
+
+logger = logger_create()
+
+def get_current_branch(repodir=None):
+ try:
+ if not os.path.exists(os.path.join(repodir if repodir else '', ".git")):
+ # Repo not created yet (i.e. during init) so just assume master
+ return "master"
+ branchname = runcmd("git symbolic-ref HEAD 2>/dev/null", repodir).strip()
+ if branchname.startswith("refs/heads/"):
+ branchname = branchname[11:]
+ return branchname
+ except subprocess.CalledProcessError:
+ return ""
+
+class Configuration(object):
+ """
+ Manages the configuration
+
+ For an example config file, see combo-layer.conf.example
+
+ """
+ def __init__(self, options):
+ for key, val in options.__dict__.items():
+ setattr(self, key, val)
+
+ def readsection(parser, section, repo):
+ for (name, value) in parser.items(section):
+ if value.startswith("@"):
+ self.repos[repo][name] = eval(value.strip("@"))
+ else:
+ # Apply special type transformations for some properties.
+ # Type matches the RawConfigParser.get*() methods.
+ types = {'signoff': 'boolean', 'update': 'boolean', 'history': 'boolean'}
+ if name in types:
+ value = getattr(parser, 'get' + types[name])(section, name)
+ self.repos[repo][name] = value
+
+ def readglobalsection(parser, section):
+ for (name, value) in parser.items(section):
+ if name == "commit_msg":
+ self.commit_msg_template = value
+
+ logger.debug("Loading config file %s" % self.conffile)
+ self.parser = configparser.ConfigParser()
+ with open(self.conffile) as f:
+ self.parser.readfp(f)
+
+ # initialize default values
+ self.commit_msg_template = "Automatic commit to update last_revision"
+
+ self.repos = {}
+ for repo in self.parser.sections():
+ if repo == "combo-layer-settings":
+ # special handling for global settings
+ readglobalsection(self.parser, repo)
+ else:
+ self.repos[repo] = {}
+ readsection(self.parser, repo, repo)
+
+ # Load local configuration, if available
+ self.localconffile = None
+ self.localparser = None
+ self.combobranch = None
+ if self.conffile.endswith('.conf'):
+ lcfile = self.conffile.replace('.conf', '-local.conf')
+ if os.path.exists(lcfile):
+ # Read combo layer branch
+ self.combobranch = get_current_branch()
+ logger.debug("Combo layer branch is %s" % self.combobranch)
+
+ self.localconffile = lcfile
+ logger.debug("Loading local config file %s" % self.localconffile)
+ self.localparser = configparser.ConfigParser()
+ with open(self.localconffile) as f:
+ self.localparser.readfp(f)
+
+ for section in self.localparser.sections():
+ if '|' in section:
+ sectionvals = section.split('|')
+ repo = sectionvals[0]
+ if sectionvals[1] != self.combobranch:
+ continue
+ else:
+ repo = section
+ if repo in self.repos:
+ readsection(self.localparser, section, repo)
+
+ def update(self, repo, option, value, initmode=False):
+ # If the main config has the option already, that is what we
+ # are expected to modify.
+ if self.localparser and not self.parser.has_option(repo, option):
+ parser = self.localparser
+ section = "%s|%s" % (repo, self.combobranch)
+ conffile = self.localconffile
+ if initmode and not parser.has_section(section):
+ parser.add_section(section)
+ else:
+ parser = self.parser
+ section = repo
+ conffile = self.conffile
+ parser.set(section, option, value)
+ with open(conffile, "w") as f:
+ parser.write(f)
+ self.repos[repo][option] = value
+
+ def sanity_check(self, initmode=False):
+ required_options=["src_uri", "local_repo_dir", "dest_dir", "last_revision"]
+ if initmode:
+ required_options.remove("last_revision")
+ msg = ""
+ missing_options = []
+ for name in self.repos:
+ for option in required_options:
+ if option not in self.repos[name]:
+ msg = "%s\nOption %s is not defined for component %s" %(msg, option, name)
+ missing_options.append(option)
+ # Sanitize dest_dir so that we do not have to deal with edge cases
+ # (unset, empty string, double slashes) in the rest of the code.
+ # It not being set will still be flagged as error because it is
+ # listed as required option above; that could be changed now.
+ dest_dir = os.path.normpath(self.repos[name].get("dest_dir", "."))
+ self.repos[name]["dest_dir"] = "." if not dest_dir else dest_dir
+ if msg != "":
+ logger.error("configuration file %s has the following error: %s" % (self.conffile,msg))
+ if self.localconffile and 'last_revision' in missing_options:
+ logger.error("local configuration file %s may be missing configuration for combo branch %s" % (self.localconffile, self.combobranch))
+ sys.exit(1)
+
+ # filterdiff is required by action_splitpatch, so check its availability
+ if subprocess.call("which filterdiff > /dev/null 2>&1", shell=True) != 0:
+ logger.error("ERROR: patchutils package is missing, please install it (e.g. # apt-get install patchutils)")
+ sys.exit(1)
+
+def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
+ """
+ execute command, raise CalledProcessError if fail
+ return output if succeed
+ """
+ logger.debug("run cmd '%s' in %s" % (cmd, os.getcwd() if destdir is None else destdir))
+ if not out:
+ out = tempfile.TemporaryFile()
+ err = out
+ else:
+ err = tempfile.TemporaryFile()
+ try:
+ subprocess.check_call(cmd, stdout=out, stderr=err, cwd=destdir, shell=isinstance(cmd, str), env=env or os.environ)
+ except subprocess.CalledProcessError as e:
+ err.seek(0)
+ if printerr:
+ logger.error("%s" % err.read())
+ raise e
+
+ err.seek(0)
+ output = err.read().decode('utf-8')
+ logger.debug("output: %s" % output.replace(chr(0), '\\0'))
+ return output
+
+def action_init(conf, args):
+ """
+ Clone component repositories
+ Check git is initialised; if not, copy initial data from component repos
+ """
+ for name in conf.repos:
+ ldir = conf.repos[name]['local_repo_dir']
+ if not os.path.exists(ldir):
+ logger.info("cloning %s to %s" %(conf.repos[name]['src_uri'], ldir))
+ subprocess.check_call("git clone %s %s" % (conf.repos[name]['src_uri'], ldir), shell=True)
+ if not os.path.exists(".git"):
+ runcmd("git init")
+ if conf.history:
+ # Need a common ref for all trees.
+ runcmd('git commit -m "initial empty commit" --allow-empty')
+ startrev = runcmd('git rev-parse master').strip()
+
+ for name in conf.repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ lastrev = repo.get('last_revision', None)
+ if lastrev and lastrev != "HEAD":
+ initialrev = lastrev
+ if branch:
+ if not check_rev_branch(name, ldir, lastrev, branch):
+ sys.exit(1)
+ logger.info("Copying data from %s at specified revision %s..." % (name, lastrev))
+ else:
+ lastrev = None
+ initialrev = branch
+ logger.info("Copying data from %s..." % name)
+ # Sanity check initialrev and turn it into hash (required for copying history,
+ # because resolving a name ref only works in the component repo).
+ rev = runcmd('git rev-parse %s' % initialrev, ldir).strip()
+ if rev != initialrev:
+ try:
+ refs = runcmd('git show-ref -s %s' % initialrev, ldir).split('\n')
+ if len(set(refs)) > 1:
+ # Happens for example when configured to track
+ # "master" and there is a refs/heads/master. The
+ # traditional behavior from "git archive" (preserved
+ # here) it to choose the first one. This might not be
+ # intended, so at least warn about it.
+ logger.warn("%s: initial revision '%s' not unique, picking result of rev-parse = %s" %
+ (name, initialrev, refs[0]))
+ initialrev = rev
+ except:
+ # show-ref fails for hashes. Skip the sanity warning in that case.
+ pass
+ initialrev = rev
+ dest_dir = repo['dest_dir']
+ if dest_dir != ".":
+ extract_dir = os.path.join(os.getcwd(), dest_dir)
+ if not os.path.exists(extract_dir):
+ os.makedirs(extract_dir)
+ else:
+ extract_dir = os.getcwd()
+ file_filter = repo.get('file_filter', "")
+ exclude_patterns = repo.get('file_exclude', '').split()
+ def copy_selected_files(initialrev, extract_dir, file_filter, exclude_patterns, ldir,
+ subdir=""):
+ # When working inside a filtered branch which had the
+ # files already moved, we need to prepend the
+ # subdirectory to all filters, otherwise they would
+ # not match.
+ if subdir == '.':
+ subdir = ''
+ elif subdir:
+ subdir = os.path.normpath(subdir)
+ file_filter = ' '.join([subdir + '/' + x for x in file_filter.split()])
+ exclude_patterns = [subdir + '/' + x for x in exclude_patterns]
+ # To handle both cases, we cd into the target
+ # directory and optionally tell tar to strip the path
+ # prefix when the files were already moved.
+ subdir_components = len(subdir.split(os.path.sep)) if subdir else 0
+ strip=('--strip-components=%d' % subdir_components) if subdir else ''
+ # TODO: file_filter wild cards do not work (and haven't worked before either), because
+ # a) GNU tar requires a --wildcards parameter before turning on wild card matching.
+ # b) The semantic is not as intendend (src/*.c also matches src/foo/bar.c,
+ # in contrast to the other use of file_filter as parameter of "git archive"
+ # where it only matches .c files directly in src).
+ files = runcmd("git archive %s %s | tar -x -v %s -C %s %s" %
+ (initialrev, subdir,
+ strip, extract_dir, file_filter),
+ ldir)
+ if exclude_patterns:
+ # Implement file removal by letting tar create the
+ # file and then deleting it in the file system
+ # again. Uses the list of files created by tar (easier
+ # than walking the tree).
+ for file in files.split('\n'):
+ if file.endswith(os.path.sep):
+ continue
+ for pattern in exclude_patterns:
+ if fnmatch.fnmatch(file, pattern):
+ os.unlink(os.path.join(*([extract_dir] + ['..'] * subdir_components + [file])))
+ break
+
+ if not conf.history:
+ copy_selected_files(initialrev, extract_dir, file_filter, exclude_patterns, ldir)
+ else:
+ # First fetch remote history into local repository.
+ # We need a ref for that, so ensure that there is one.
+ refname = "combo-layer-init-%s" % name
+ runcmd("git branch -f %s %s" % (refname, initialrev), ldir)
+ runcmd("git fetch %s %s" % (ldir, refname))
+ runcmd("git branch -D %s" % refname, ldir)
+ # Make that the head revision.
+ runcmd("git checkout -b %s %s" % (name, initialrev))
+ # Optional: cut the history by replacing the given
+ # start point(s) with commits providing the same
+ # content (aka tree), but with commit information that
+ # makes it clear that this is an artifically created
+ # commit and nothing the original authors had anything
+ # to do with.
+ since_rev = repo.get('since_revision', '')
+ if since_rev:
+ committer = runcmd('git var GIT_AUTHOR_IDENT').strip()
+ # Same time stamp, no name.
+ author = re.sub('.* (\d+ [+-]\d+)', r'unknown <unknown> \1', committer)
+ logger.info('author %s' % author)
+ for rev in since_rev.split():
+ # Resolve in component repo...
+ rev = runcmd('git log --oneline --no-abbrev-commit -n1 %s' % rev, ldir).split()[0]
+ # ... and then get the tree in current
+ # one. The commit should be in both repos with
+ # the same tree, but better check here.
+ tree = runcmd('git show -s --pretty=format:%%T %s' % rev).strip()
+ with tempfile.NamedTemporaryFile(mode='wt') as editor:
+ editor.write('''cat >$1 <<EOF
+tree %s
+author %s
+committer %s
+
+%s: squashed import of component
+
+This commit copies the entire set of files as found in
+%s %s
+
+For more information about previous commits, see the
+upstream repository.
+
+Commit created by combo-layer.
+EOF
+''' % (tree, author, committer, name, name, since_rev))
+ editor.flush()
+ os.environ['GIT_EDITOR'] = 'sh %s' % editor.name
+ runcmd('git replace --edit %s' % rev)
+
+ # Optional: rewrite history to change commit messages or to move files.
+ if 'hook' in repo or dest_dir != ".":
+ filter_branch = ['git', 'filter-branch', '--force']
+ with tempfile.NamedTemporaryFile(mode='wt') as hookwrapper:
+ if 'hook' in repo:
+ # Create a shell script wrapper around the original hook that
+ # can be used by git filter-branch. Hook may or may not have
+ # an absolute path.
+ hook = repo['hook']
+ hook = os.path.join(os.path.dirname(conf.conffile), '..', hook)
+ # The wrappers turns the commit message
+ # from stdin into a fake patch header.
+ # This is good enough for changing Subject
+ # and commit msg body with normal
+ # combo-layer hooks.
+ hookwrapper.write('''set -e
+tmpname=$(mktemp)
+trap "rm $tmpname" EXIT
+echo -n 'Subject: [PATCH] ' >>$tmpname
+cat >>$tmpname
+if ! [ $(tail -c 1 $tmpname | od -A n -t x1) == '0a' ]; then
+ echo >>$tmpname
+fi
+echo '---' >>$tmpname
+%s $tmpname $GIT_COMMIT %s
+tail -c +18 $tmpname | head -c -4
+''' % (hook, name))
+ hookwrapper.flush()
+ filter_branch.extend(['--msg-filter', 'bash %s' % hookwrapper.name])
+ if dest_dir != ".":
+ parent = os.path.dirname(dest_dir)
+ if not parent:
+ parent = '.'
+ # May run outside of the current directory, so do not assume that .git exists.
+ filter_branch.extend(['--tree-filter', 'mkdir -p .git/tmptree && find . -mindepth 1 -maxdepth 1 ! -name .git -print0 | xargs -0 -I SOURCE mv SOURCE .git/tmptree && mkdir -p %s && mv .git/tmptree %s' % (parent, dest_dir)])
+ filter_branch.append('HEAD')
+ runcmd(filter_branch)
+ runcmd('git update-ref -d refs/original/refs/heads/%s' % name)
+ repo['rewritten_revision'] = runcmd('git rev-parse HEAD').strip()
+ repo['stripped_revision'] = repo['rewritten_revision']
+ # Optional filter files: remove everything and re-populate using the normal filtering code.
+ # Override any potential .gitignore.
+ if file_filter or exclude_patterns:
+ runcmd('git rm -rf .')
+ if not os.path.exists(extract_dir):
+ os.makedirs(extract_dir)
+ copy_selected_files('HEAD', extract_dir, file_filter, exclude_patterns, '.',
+ subdir=dest_dir)
+ runcmd('git add --all --force .')
+ if runcmd('git status --porcelain'):
+ # Something to commit.
+ runcmd(['git', 'commit', '-m',
+ '''%s: select file subset
+
+Files from the component repository were chosen based on
+the following filters:
+file_filter = %s
+file_exclude = %s''' % (name, file_filter or '<empty>', repo.get('file_exclude', '<empty>'))])
+ repo['stripped_revision'] = runcmd('git rev-parse HEAD').strip()
+
+ if not lastrev:
+ lastrev = runcmd('git rev-parse %s' % initialrev, ldir).strip()
+ conf.update(name, "last_revision", lastrev, initmode=True)
+
+ if not conf.history:
+ runcmd("git add .")
+ else:
+ # Create Octopus merge commit according to http://stackoverflow.com/questions/10874149/git-octopus-merge-with-unrelated-repositoies
+ runcmd('git checkout master')
+ merge = ['git', 'merge', '--no-commit']
+ for name in conf.repos:
+ repo = conf.repos[name]
+ # Use branch created earlier.
+ merge.append(name)
+ # Root all commits which have no parent in the common
+ # ancestor in the new repository.
+ for start in runcmd('git log --pretty=format:%%H --max-parents=0 %s --' % name).split('\n'):
+ runcmd('git replace --graft %s %s' % (start, startrev))
+ try:
+ runcmd(merge)
+ except Exception as error:
+ logger.info('''Merging component repository history failed, perhaps because of merge conflicts.
+It may be possible to commit anyway after resolving these conflicts.
+
+%s''' % error)
+ # Create MERGE_HEAD and MERGE_MSG. "git merge" itself
+ # does not create MERGE_HEAD in case of a (harmless) failure,
+ # and we want certain auto-generated information in the
+ # commit message for future reference and/or automation.
+ with open('.git/MERGE_HEAD', 'w') as head:
+ with open('.git/MERGE_MSG', 'w') as msg:
+ msg.write('repo: initial import of components\n\n')
+ # head.write('%s\n' % startrev)
+ for name in conf.repos:
+ repo = conf.repos[name]
+ # <upstream ref> <rewritten ref> <rewritten + files removed>
+ msg.write('combo-layer-%s: %s %s %s\n' % (name,
+ repo['last_revision'],
+ repo['rewritten_revision'],
+ repo['stripped_revision']))
+ rev = runcmd('git rev-parse %s' % name).strip()
+ head.write('%s\n' % rev)
+
+ if conf.localconffile:
+ localadded = True
+ try:
+ runcmd("git rm --cached %s" % conf.localconffile, printerr=False)
+ except subprocess.CalledProcessError:
+ localadded = False
+ if localadded:
+ localrelpath = os.path.relpath(conf.localconffile)
+ runcmd("grep -q %s .gitignore || echo %s >> .gitignore" % (localrelpath, localrelpath))
+ runcmd("git add .gitignore")
+ logger.info("Added local configuration file %s to .gitignore", localrelpath)
+ logger.info("Initial combo layer repository data has been created; please make any changes if desired and then use 'git commit' to make the initial commit.")
+ else:
+ logger.info("Repository already initialised, nothing to do.")
+
+
+def check_repo_clean(repodir):
+ """
+ check if the repo is clean
+ exit if repo is dirty
+ """
+ output=runcmd("git status --porcelain", repodir)
+ r = re.compile('\?\? patch-.*/')
+ dirtyout = [item for item in output.splitlines() if not r.match(item)]
+ if dirtyout:
+ logger.error("git repo %s is dirty, please fix it first", repodir)
+ sys.exit(1)
+
+def check_patch(patchfile):
+ f = open(patchfile, 'rb')
+ ln = f.readline()
+ of = None
+ in_patch = False
+ beyond_msg = False
+ pre_buf = b''
+ while ln:
+ if not beyond_msg:
+ if ln == b'---\n':
+ if not of:
+ break
+ in_patch = False
+ beyond_msg = True
+ elif ln.startswith(b'--- '):
+ # We have a diff in the commit message
+ in_patch = True
+ if not of:
+ print('WARNING: %s contains a diff in its commit message, indenting to avoid failure during apply' % patchfile)
+ of = open(patchfile + '.tmp', 'wb')
+ of.write(pre_buf)
+ pre_buf = b''
+ elif in_patch and not ln[0] in b'+-@ \n\r':
+ in_patch = False
+ if of:
+ if in_patch:
+ of.write(b' ' + ln)
+ else:
+ of.write(ln)
+ else:
+ pre_buf += ln
+ ln = f.readline()
+ f.close()
+ if of:
+ of.close()
+ os.rename(patchfile + '.tmp', patchfile)
+
+def drop_to_shell(workdir=None):
+ if not sys.stdin.isatty():
+ print("Not a TTY so can't drop to shell for resolution, exiting.")
+ return False
+
+ shell = os.environ.get('SHELL', 'bash')
+ print('Dropping to shell "%s"\n' \
+ 'When you are finished, run the following to continue:\n' \
+ ' exit -- continue to apply the patches\n' \
+ ' exit 1 -- abort\n' % shell);
+ ret = subprocess.call([shell], cwd=workdir)
+ if ret != 0:
+ print("Aborting")
+ return False
+ else:
+ return True
+
+def check_rev_branch(component, repodir, rev, branch):
+ try:
+ actualbranch = runcmd("git branch --contains %s" % rev, repodir, printerr=False)
+ except subprocess.CalledProcessError as e:
+ if e.returncode == 129:
+ actualbranch = ""
+ else:
+ raise
+
+ if not actualbranch:
+ logger.error("%s: specified revision %s is invalid!" % (component, rev))
+ return False
+
+ branches = []
+ branchlist = actualbranch.split("\n")
+ for b in branchlist:
+ branches.append(b.strip().split(' ')[-1])
+
+ if branch not in branches:
+ logger.error("%s: specified revision %s is not on specified branch %s!" % (component, rev, branch))
+ return False
+ return True
+
+def get_repos(conf, repo_names):
+ repos = []
+ for name in repo_names:
+ if name.startswith('-'):
+ break
+ else:
+ repos.append(name)
+ for repo in repos:
+ if not repo in conf.repos:
+ logger.error("Specified component '%s' not found in configuration" % repo)
+ sys.exit(1)
+
+ if not repos:
+ repos = [ repo for repo in conf.repos if conf.repos[repo].get("update", True) ]
+
+ return repos
+
+def action_pull(conf, args):
+ """
+ update the component repos only
+ """
+ repos = get_repos(conf, args[1:])
+
+ # make sure all repos are clean
+ for name in repos:
+ check_repo_clean(conf.repos[name]['local_repo_dir'])
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ logger.info("update branch %s of component repo %s in %s ..." % (branch, name, ldir))
+ if not conf.hard_reset:
+ # Try to pull only the configured branch. Beware that this may fail
+ # when the branch is currently unknown (for example, after reconfiguring
+ # combo-layer). In that case we need to fetch everything and try the check out
+ # and pull again.
+ try:
+ runcmd("git checkout %s" % branch, ldir, printerr=False)
+ except subprocess.CalledProcessError:
+ output=runcmd("git fetch", ldir)
+ logger.info(output)
+ runcmd("git checkout %s" % branch, ldir)
+ runcmd("git pull --ff-only", ldir)
+ else:
+ output=runcmd("git pull --ff-only", ldir)
+ logger.info(output)
+ else:
+ output=runcmd("git fetch", ldir)
+ logger.info(output)
+ runcmd("git checkout %s" % branch, ldir)
+ runcmd("git reset --hard FETCH_HEAD", ldir)
+
+def action_update(conf, args):
+ """
+ update the component repos
+ either:
+ generate the patch list
+ apply the generated patches
+ or:
+ re-creates the entire component history and merges them
+ into the current branch with a merge commit
+ """
+ components = [arg.split(':')[0] for arg in args[1:]]
+ revisions = {}
+ for arg in args[1:]:
+ if ':' in arg:
+ a = arg.split(':', 1)
+ revisions[a[0]] = a[1]
+ repos = get_repos(conf, components)
+
+ # make sure combo repo is clean
+ check_repo_clean(os.getcwd())
+
+ # Check whether we keep the component histories. Must be
+ # set either via --history command line parameter or consistently
+ # in combo-layer.conf. Mixing modes is (currently, and probably
+ # permanently because it would be complicated) not supported.
+ if conf.history:
+ history = True
+ else:
+ history = None
+ for name in repos:
+ repo = conf.repos[name]
+ repo_history = repo.get('history', False)
+ if history is None:
+ history = repo_history
+ elif history != repo_history:
+ logger.error("'history' property is set inconsistently")
+ sys.exit(1)
+
+ # Step 1: update the component repos
+ if conf.nopull:
+ logger.info("Skipping pull (-n)")
+ else:
+ action_pull(conf, ['arg0'] + components)
+
+ if history:
+ update_with_history(conf, components, revisions, repos)
+ else:
+ update_with_patches(conf, components, revisions, repos)
+
+def update_with_patches(conf, components, revisions, repos):
+ import uuid
+ patch_dir = "patch-%s" % uuid.uuid4()
+ if not os.path.exists(patch_dir):
+ os.mkdir(patch_dir)
+
+ for name in repos:
+ revision = revisions.get(name, None)
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ dest_dir = repo['dest_dir']
+ branch = repo.get('branch', "master")
+ repo_patch_dir = os.path.join(os.getcwd(), patch_dir, name)
+
+ # Step 2: generate the patch list and store to patch dir
+ logger.info("Generating patches from %s..." % name)
+ top_revision = revision or branch
+ if not check_rev_branch(name, ldir, top_revision, branch):
+ sys.exit(1)
+ if dest_dir != ".":
+ prefix = "--src-prefix=a/%s/ --dst-prefix=b/%s/" % (dest_dir, dest_dir)
+ else:
+ prefix = ""
+ if repo['last_revision'] == "":
+ logger.info("Warning: last_revision of component %s is not set, starting from the first commit" % name)
+ patch_cmd_range = "--root %s" % top_revision
+ rev_cmd_range = top_revision
+ else:
+ if not check_rev_branch(name, ldir, repo['last_revision'], branch):
+ sys.exit(1)
+ patch_cmd_range = "%s..%s" % (repo['last_revision'], top_revision)
+ rev_cmd_range = patch_cmd_range
+
+ file_filter = repo.get('file_filter',".")
+
+ # Filter out unwanted files
+ exclude = repo.get('file_exclude', '')
+ if exclude:
+ for path in exclude.split():
+ p = "%s/%s" % (dest_dir, path) if dest_dir != '.' else path
+ file_filter += " ':!%s'" % p
+
+ patch_cmd = "git format-patch -N %s --output-directory %s %s -- %s" % \
+ (prefix,repo_patch_dir, patch_cmd_range, file_filter)
+ output = runcmd(patch_cmd, ldir)
+ logger.debug("generated patch set:\n%s" % output)
+ patchlist = output.splitlines()
+
+ rev_cmd = "git rev-list --no-merges %s -- %s" % (rev_cmd_range, file_filter)
+ revlist = runcmd(rev_cmd, ldir).splitlines()
+
+ # Step 3: Call repo specific hook to adjust patch
+ if 'hook' in repo:
+ # hook parameter is: ./hook patchpath revision reponame
+ count=len(revlist)-1
+ for patch in patchlist:
+ runcmd("%s %s %s %s" % (repo['hook'], patch, revlist[count], name))
+ count=count-1
+
+ # Step 4: write patch list and revision list to file, for user to edit later
+ patchlist_file = os.path.join(os.getcwd(), patch_dir, "patchlist-%s" % name)
+ repo['patchlist'] = patchlist_file
+ f = open(patchlist_file, 'w')
+ count=len(revlist)-1
+ for patch in patchlist:
+ f.write("%s %s\n" % (patch, revlist[count]))
+ check_patch(os.path.join(patch_dir, patch))
+ count=count-1
+ f.close()
+
+ # Step 5: invoke bash for user to edit patch and patch list
+ if conf.interactive:
+ print('You may now edit the patch and patch list in %s\n' \
+ 'For example, you can remove unwanted patch entries from patchlist-*, so that they will be not applied later' % patch_dir);
+ if not drop_to_shell(patch_dir):
+ sys.exit(1)
+
+ # Step 6: apply the generated and revised patch
+ apply_patchlist(conf, repos)
+ runcmd("rm -rf %s" % patch_dir)
+
+ # Step 7: commit the updated config file if it's being tracked
+ commit_conf_file(conf, components)
+
+def conf_commit_msg(conf, components):
+ # create the "components" string
+ component_str = "all components"
+ if len(components) > 0:
+ # otherwise tell which components were actually changed
+ component_str = ", ".join(components)
+
+ # expand the template with known values
+ template = Template(conf.commit_msg_template)
+ msg = template.substitute(components = component_str)
+ return msg
+
+def commit_conf_file(conf, components, commit=True):
+ relpath = os.path.relpath(conf.conffile)
+ try:
+ output = runcmd("git status --porcelain %s" % relpath, printerr=False)
+ except:
+ # Outside the repository
+ output = None
+ if output:
+ if output.lstrip().startswith("M"):
+ logger.info("Committing updated configuration file")
+ if commit:
+ msg = conf_commit_msg(conf, components)
+ runcmd('git commit -m'.split() + [msg, relpath])
+ else:
+ runcmd('git add %s' % relpath)
+ return True
+ return False
+
+def apply_patchlist(conf, repos):
+ """
+ apply the generated patch list to combo repo
+ """
+ for name in repos:
+ repo = conf.repos[name]
+ lastrev = repo["last_revision"]
+ prevrev = lastrev
+
+ # Get non-blank lines from patch list file
+ patchlist = []
+ if os.path.exists(repo['patchlist']) or not conf.interactive:
+ # Note: we want this to fail here if the file doesn't exist and we're not in
+ # interactive mode since the file should exist in this case
+ with open(repo['patchlist']) as f:
+ for line in f:
+ line = line.rstrip()
+ if line:
+ patchlist.append(line)
+
+ ldir = conf.repos[name]['local_repo_dir']
+ branch = conf.repos[name].get('branch', "master")
+ branchrev = runcmd("git rev-parse %s" % branch, ldir).strip()
+
+ if patchlist:
+ logger.info("Applying patches from %s..." % name)
+ linecount = len(patchlist)
+ i = 1
+ for line in patchlist:
+ patchfile = line.split()[0]
+ lastrev = line.split()[1]
+ patchdisp = os.path.relpath(patchfile)
+ if os.path.getsize(patchfile) == 0:
+ logger.info("(skipping %d/%d %s - no changes)" % (i, linecount, patchdisp))
+ else:
+ cmd = "git am --keep-cr %s-p1 %s" % ('-s ' if repo.get('signoff', True) else '', patchfile)
+ logger.info("Applying %d/%d: %s" % (i, linecount, patchdisp))
+ try:
+ runcmd(cmd)
+ except subprocess.CalledProcessError:
+ logger.info('Running "git am --abort" to cleanup repo')
+ runcmd("git am --abort")
+ logger.error('"%s" failed' % cmd)
+ logger.info("Please manually apply patch %s" % patchdisp)
+ logger.info("Note: if you exit and continue applying without manually applying the patch, it will be skipped")
+ if not drop_to_shell():
+ if prevrev != repo['last_revision']:
+ conf.update(name, "last_revision", prevrev)
+ sys.exit(1)
+ prevrev = lastrev
+ i += 1
+ # Once all patches are applied, we should update
+ # last_revision to the branch head instead of the last
+ # applied patch. The two are not necessarily the same when
+ # the last commit is a merge commit or when the patches at
+ # the branch head were intentionally excluded.
+ #
+ # If we do not do that for a merge commit, the next
+ # combo-layer run will only exclude patches reachable from
+ # one of the merged branches and try to re-apply patches
+ # from other branches even though they were already
+ # copied.
+ #
+ # If patches were intentionally excluded, the next run will
+ # present them again instead of skipping over them. This
+ # may or may not be intended, so the code here is conservative
+ # and only addresses the "head is merge commit" case.
+ if lastrev != branchrev and \
+ len(runcmd("git show --pretty=format:%%P --no-patch %s" % branch, ldir).split()) > 1:
+ lastrev = branchrev
+ else:
+ logger.info("No patches to apply from %s" % name)
+ lastrev = branchrev
+
+ if lastrev != repo['last_revision']:
+ conf.update(name, "last_revision", lastrev)
+
+def action_splitpatch(conf, args):
+ """
+ generate the commit patch and
+ split the patch per repo
+ """
+ logger.debug("action_splitpatch")
+ if len(args) > 1:
+ commit = args[1]
+ else:
+ commit = "HEAD"
+ patchdir = "splitpatch-%s" % commit
+ if not os.path.exists(patchdir):
+ os.mkdir(patchdir)
+
+ # filerange_root is for the repo whose dest_dir is root "."
+ # and it should be specified by excluding all other repo dest dir
+ # like "-x repo1 -x repo2 -x repo3 ..."
+ filerange_root = ""
+ for name in conf.repos:
+ dest_dir = conf.repos[name]['dest_dir']
+ if dest_dir != ".":
+ filerange_root = '%s -x "%s/*"' % (filerange_root, dest_dir)
+
+ for name in conf.repos:
+ dest_dir = conf.repos[name]['dest_dir']
+ patch_filename = "%s/%s.patch" % (patchdir, name)
+ if dest_dir == ".":
+ cmd = "git format-patch -n1 --stdout %s^..%s | filterdiff -p1 %s > %s" % (commit, commit, filerange_root, patch_filename)
+ else:
+ cmd = "git format-patch --no-prefix -n1 --stdout %s^..%s -- %s > %s" % (commit, commit, dest_dir, patch_filename)
+ runcmd(cmd)
+ # Detect empty patches (including those produced by filterdiff above
+ # that contain only preamble text)
+ if os.path.getsize(patch_filename) == 0 or runcmd("filterdiff %s" % patch_filename) == "":
+ os.remove(patch_filename)
+ logger.info("(skipping %s - no changes)", name)
+ else:
+ logger.info(patch_filename)
+
+def update_with_history(conf, components, revisions, repos):
+ '''Update all components with full history.
+
+ Works by importing all commits reachable from a component's
+ current head revision. If those commits are rooted in an already
+ imported commit, their content gets mixed with the content of the
+ combined repo of that commit (new or modified files overwritten,
+ removed files removed).
+
+ The last commit is an artificial merge commit that merges all the
+ updated components into the combined repository.
+
+ The HEAD ref only gets updated at the very end. All intermediate work
+ happens in a worktree which will get garbage collected by git eventually
+ after a failure.
+ '''
+ # Remember current HEAD and what we need to add to it.
+ head = runcmd("git rev-parse HEAD").strip()
+ additional_heads = {}
+
+ # Track the mapping between original commit and commit in the
+ # combined repo. We do not have to distinguish between components,
+ # because commit hashes are different anyway. Often we can
+ # skip find_revs() entirely (for example, when all new commits
+ # are derived from the last imported revision).
+ #
+ # Using "head" (typically the merge commit) instead of the actual
+ # commit for the component leads to a nicer history in the combined
+ # repo.
+ old2new_revs = {}
+ for name in repos:
+ repo = conf.repos[name]
+ revision = repo['last_revision']
+ if revision:
+ old2new_revs[revision] = head
+
+ def add_p(parents):
+ '''Insert -p before each entry.'''
+ parameters = []
+ for p in parents:
+ parameters.append('-p')
+ parameters.append(p)
+ return parameters
+
+ # Do all intermediate work with a separate work dir and index,
+ # chosen via env variables (can't use "git worktree", it is too
+ # new). This is useful (no changes to current work tree unless the
+ # update succeeds) and required (otherwise we end up temporarily
+ # removing the combo-layer hooks that we currently use when
+ # importing a new component).
+ #
+ # Not cleaned up after a failure at the moment.
+ wdir = os.path.join(os.getcwd(), ".git", "combo-layer")
+ windex = wdir + ".index"
+ if os.path.isdir(wdir):
+ shutil.rmtree(wdir)
+ os.mkdir(wdir)
+ wenv = copy.deepcopy(os.environ)
+ wenv["GIT_WORK_TREE"] = wdir
+ wenv["GIT_INDEX_FILE"] = windex
+ # This one turned out to be needed in practice.
+ wenv["GIT_OBJECT_DIRECTORY"] = os.path.join(os.getcwd(), ".git", "objects")
+ wargs = {"destdir": wdir, "env": wenv}
+
+ for name in repos:
+ revision = revisions.get(name, None)
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ dest_dir = repo['dest_dir']
+ branch = repo.get('branch', "master")
+ hook = repo.get('hook', None)
+ largs = {"destdir": ldir, "env": None}
+ file_include = repo.get('file_filter', '').split()
+ file_include.sort() # make sure that short entries like '.' come first.
+ file_exclude = repo.get('file_exclude', '').split()
+
+ def include_file(file):
+ if not file_include:
+ # No explicit filter set, include file.
+ return True
+ for filter in file_include:
+ if filter == '.':
+ # Another special case: include current directory and thus all files.
+ return True
+ if os.path.commonprefix((filter, file)) == filter:
+ # Included in directory or direct file match.
+ return True
+ # Check for wildcard match *with* allowing * to match /, i.e.
+ # src/*.c does match src/foobar/*.c. That's not how it is done elsewhere
+ # when passing the filtering to "git archive", but it is unclear what
+ # the intended semantic is (the comment on file_exclude that "append a * wildcard
+ # at the end" to match the full content of a directories implies that
+ # slashes are indeed not special), so here we simply do what's easy to
+ # implement in Python.
+ logger.debug('fnmatch(%s, %s)' % (file, filter))
+ if fnmatch.fnmatchcase(file, filter):
+ return True
+ return False
+
+ def exclude_file(file):
+ for filter in file_exclude:
+ if fnmatch.fnmatchcase(file, filter):
+ return True
+ return False
+
+ def file_filter(files):
+ '''Clean up file list so that only included files remain.'''
+ index = 0
+ while index < len(files):
+ file = files[index]
+ if not include_file(file) or exclude_file(file):
+ del files[index]
+ else:
+ index += 1
+
+
+ # Generate the revision list.
+ logger.info("Analyzing commits from %s..." % name)
+ top_revision = revision or branch
+ if not check_rev_branch(name, ldir, top_revision, branch):
+ sys.exit(1)
+
+ last_revision = repo['last_revision']
+ rev_list_args = "--full-history --sparse --topo-order --reverse"
+ if not last_revision:
+ logger.info("Warning: last_revision of component %s is not set, starting from the first commit" % name)
+ rev_list_args = rev_list_args + ' ' + top_revision
+ else:
+ if not check_rev_branch(name, ldir, last_revision, branch):
+ sys.exit(1)
+ rev_list_args = "%s %s..%s" % (rev_list_args, last_revision, top_revision)
+
+ # By definition, the current HEAD contains the latest imported
+ # commit of each component. We use that as initial mapping even
+ # though the commits do not match exactly because
+ # a) it always works (in contrast to find_revs, which relies on special
+ # commit messages)
+ # b) it is faster than find_revs, which will only be called on demand
+ # and can be skipped entirely in most cases
+ # c) last but not least, the combined history looks nicer when all
+ # new commits are rooted in the same merge commit
+ old2new_revs[last_revision] = head
+
+ # We care about all commits (--full-history and --sparse) and
+ # we want reconstruct the topology and thus do not care
+ # about ordering by time (--topo-order). We ask for the ones
+ # we need to import first to be listed first (--reverse).
+ revs = runcmd("git rev-list %s" % rev_list_args, **largs).split()
+ logger.debug("To be imported: %s" % revs)
+ # Now 'revs' contains all revisions reachable from the top revision.
+ # All revisions derived from the 'last_revision' definitely are new,
+ # whereas the others may or may not have been imported before. For
+ # a linear history in the component, that second set will be empty.
+ # To distinguish between them, we also get the shorter list
+ # of revisions starting at the ancestor.
+ if last_revision:
+ ancestor_revs = runcmd("git rev-list --ancestry-path %s" % rev_list_args, **largs).split()
+ else:
+ ancestor_revs = []
+ logger.debug("Ancestors: %s" % ancestor_revs)
+
+ # Now import each revision.
+ logger.info("Importing commits from %s..." % name)
+ def import_rev(rev):
+ global scanned_revs
+
+ # If it is part of the new commits, we definitely need
+ # to import it. Otherwise we need to check, we might have
+ # imported it before. If it was imported and we merely
+ # fail to find it because commit messages did not track
+ # the mapping, then we end up importing it again. So
+ # combined repos using "updating with history" really should
+ # enable the "From ... rev:" commit header modifications.
+ if rev not in ancestor_revs and rev not in old2new_revs and not scanned_revs:
+ logger.debug("Revision %s triggers log analysis." % rev)
+ find_revs(old2new_revs, head)
+ scanned_revs = True
+ new_rev = old2new_revs.get(rev, None)
+ if new_rev:
+ return new_rev
+
+ # If the commit is not in the original list of revisions
+ # to be imported, then it must be a parent of one of those
+ # commits and it was skipped during earlier imports or not
+ # found. Importing such merge commits leads to very ugly
+ # history (long cascade of merge commits which all point
+ # to to older commits) when switching from "update via
+ # patches" to "update with history".
+ #
+ # We can avoid importing merge commits if all non-merge commits
+ # reachable from it were already imported. In that case we
+ # can root the new commits in the current head revision.
+ def is_imported(prev):
+ parents = runcmd("git show --no-patch --pretty=format:%P " + prev, **largs).split()
+ if len(parents) > 1:
+ for p in parents:
+ if not is_imported(p):
+ logger.debug("Must import %s because %s is not imported." % (rev, p))
+ return False
+ return True
+ elif prev in old2new_revs:
+ return True
+ else:
+ logger.debug("Must import %s because %s is not imported." % (rev, prev))
+ return False
+ if rev not in revs and is_imported(rev):
+ old2new_revs[rev] = head
+ return head
+
+ # Need to import rev. Collect some information about it.
+ logger.debug("Importing %s" % rev)
+ (parents, author_name, author_email, author_timestamp, body) = \
+ runcmd("git show --no-patch --pretty=format:%P%x00%an%x00%ae%x00%at%x00%B " + rev, **largs).split(chr(0))
+ parents = parents.split()
+ if parents:
+ # Arbitrarily pick the first parent as base. It may or may not have
+ # been imported before. For example, if the parent is a merge commit
+ # and previously the combined repository used patching as update
+ # method, then the actual merge commit parent never was imported.
+ # To cover this, We recursively import parents.
+ parent = parents[0]
+ new_parent = import_rev(parent)
+ # Clean index and working tree. TODO: can we combine this and the
+ # next into one command with less file IO?
+ # "git reset --hard" does not work, it changes HEAD of the parent
+ # repo, which we wanted to avoid. Probably need to keep
+ # track of the rev that corresponds to the index and use apply_commit().
+ runcmd("git rm -q --ignore-unmatch -rf .", **wargs)
+ # Update index and working tree to match the parent.
+ runcmd("git checkout -q -f %s ." % new_parent, **wargs)
+ else:
+ parent = None
+ # Clean index and working tree.
+ runcmd("git rm -q --ignore-unmatch -rf .", **wargs)
+
+ # Modify index and working tree such that it mirrors the commit.
+ apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=file_filter)
+
+ # Now commit.
+ new_tree = runcmd("git write-tree", **wargs).strip()
+ env = copy.deepcopy(wenv)
+ env['GIT_AUTHOR_NAME'] = author_name
+ env['GIT_AUTHOR_EMAIL'] = author_email
+ env['GIT_AUTHOR_DATE'] = author_timestamp
+ if hook:
+ # Need to turn the verbatim commit message into something resembling a patch header
+ # for the hook.
+ with tempfile.NamedTemporaryFile(mode='wt', delete=False) as patch:
+ patch.write('Subject: [PATCH] ')
+ patch.write(body)
+ patch.write('\n---\n')
+ patch.close()
+ runcmd([hook, patch.name, rev, name])
+ with open(patch.name) as f:
+ body = f.read()[len('Subject: [PATCH] '):][:-len('\n---\n')]
+
+ # We can skip non-merge commits that did not change any files. Those are typically
+ # the result of file filtering, although they could also have been introduced
+ # intentionally upstream, in which case we drop some information here.
+ if len(parents) == 1:
+ parent_rev = import_rev(parents[0])
+ old_tree = runcmd("git show -s --pretty=format:%T " + parent_rev, **wargs).strip()
+ commit = old_tree != new_tree
+ if not commit:
+ new_rev = parent_rev
+ else:
+ commit = True
+ if commit:
+ new_rev = runcmd("git commit-tree".split() + add_p([import_rev(p) for p in parents]) +
+ ["-m", body, new_tree],
+ env=env).strip()
+ old2new_revs[rev] = new_rev
+
+ return new_rev
+
+ if revs:
+ for rev in revs:
+ import_rev(rev)
+ # Remember how to update our current head. New components get added,
+ # updated components get the delta between current head and the updated component
+ # applied.
+ additional_heads[old2new_revs[revs[-1]]] = head if repo['last_revision'] else None
+ repo['last_revision'] = revs[-1]
+
+ # Now construct the final merge commit. We create the tree by
+ # starting with the head and applying the changes from each
+ # components imported head revision.
+ if additional_heads:
+ runcmd("git reset --hard", **wargs)
+ for rev, base in additional_heads.items():
+ apply_commit(base, rev, wargs, wargs, None)
+
+ # Commit with all component branches as parents as well as the previous head.
+ logger.info("Writing final merge commit...")
+ msg = conf_commit_msg(conf, components)
+ new_tree = runcmd("git write-tree", **wargs).strip()
+ new_rev = runcmd("git commit-tree".split() +
+ add_p([head] + list(additional_heads.keys())) +
+ ["-m", msg, new_tree],
+ **wargs).strip()
+ # And done! This is the first time we change the HEAD in the actual work tree.
+ runcmd("git reset --hard %s" % new_rev)
+
+ # Update and stage the (potentially modified)
+ # combo-layer.conf, but do not commit separately.
+ for name in repos:
+ repo = conf.repos[name]
+ rev = repo['last_revision']
+ conf.update(name, "last_revision", rev)
+ if commit_conf_file(conf, components, False):
+ # Must augment the previous commit.
+ runcmd("git commit --amend -C HEAD")
+
+
+scanned_revs = False
+def find_revs(old2new, head):
+ '''Construct mapping from original commit hash to commit hash in
+ combined repo by looking at the commit messages. Depends on the
+ "From ... rev: ..." convention.'''
+ logger.info("Analyzing log messages to find previously imported commits...")
+ num_known = len(old2new)
+ log = runcmd("git log --grep='From .* rev: [a-fA-F0-9][a-fA-F0-9]*' --pretty=format:%H%x00%B%x00 " + head).split(chr(0))
+ regex = re.compile(r'From .* rev: ([a-fA-F0-9]+)')
+ for new_rev, body in zip(*[iter(log)]* 2):
+ # Use the last one, in the unlikely case there are more than one.
+ rev = regex.findall(body)[-1]
+ if rev not in old2new:
+ old2new[rev] = new_rev.strip()
+ logger.info("Found %d additional commits, leading to: %s" % (len(old2new) - num_known, old2new))
+
+
+def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
+ '''Compare revision against parent, remove files deleted in the
+ commit, re-write new or modified ones. Moves them into dest_dir.
+ Optionally filters files.
+ '''
+ if not dest_dir:
+ dest_dir = "."
+ # -r recurses into sub-directories, given is the full overview of
+ # what changed. We do not care about copy/edits or renames, so we
+ # can disable those with --no-renames (but we still parse them,
+ # because it was not clear from git documentation whether C and M
+ # lines can still occur).
+ logger.debug("Applying changes between %s and %s in %s" % (parent, rev, largs["destdir"]))
+ delete = []
+ update = []
+ if parent:
+ # Apply delta.
+ changes = runcmd("git diff-tree --no-commit-id --no-renames --name-status -r --raw -z %s %s" % (parent, rev), **largs).split(chr(0))
+ for status, name in zip(*[iter(changes)]*2):
+ if status[0] in "ACMRT":
+ update.append(name)
+ elif status[0] in "D":
+ delete.append(name)
+ else:
+ logger.error("Unknown status %s of file %s in revision %s" % (status, name, rev))
+ sys.exit(1)
+ else:
+ # Copy all files.
+ update.extend(runcmd("git ls-tree -r --name-only -z %s" % rev, **largs).split(chr(0)))
+
+ # Include/exclude files as define in the component config.
+ # Both updated and deleted file lists get filtered, because it might happen
+ # that a file gets excluded, pulled from a different component, and then the
+ # excluded file gets deleted. In that case we must keep the copy.
+ if file_filter:
+ file_filter(update)
+ file_filter(delete)
+
+ # We export into a tar archive here and extract with tar because it is simple (no
+ # need to implement file and symlink writing ourselves) and gives us some degree
+ # of parallel IO. The downside is that we have to pass the list of files via
+ # command line parameters - hopefully there will never be too many at once.
+ if update:
+ target = os.path.join(wargs["destdir"], dest_dir)
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ quoted_target = pipes.quote(target)
+ # os.sysconf('SC_ARG_MAX') is lying: running a command with
+ # string length 629343 already failed with "Argument list too
+ # long" although SC_ARG_MAX = 2097152. "man execve" explains
+ # the limitations, but those are pretty complicated. So here
+ # we just hard-code a fixed value which is more likely to work.
+ max_cmdsize = 64 * 1024
+ while update:
+ quoted_args = []
+ unquoted_args = []
+ cmdsize = 100 + len(quoted_target)
+ while update:
+ quoted_next = pipes.quote(update[0])
+ size_next = len(quoted_next) + len(dest_dir) + 1
+ logger.debug('cmdline length %d + %d < %d?' % (cmdsize, size_next, os.sysconf('SC_ARG_MAX')))
+ if cmdsize + size_next < max_cmdsize:
+ quoted_args.append(quoted_next)
+ unquoted_args.append(update.pop(0))
+ cmdsize += size_next
+ else:
+ logger.debug('Breaking the cmdline at length %d' % cmdsize)
+ break
+ logger.debug('Final cmdline length %d / %d' % (cmdsize, os.sysconf('SC_ARG_MAX')))
+ cmd = "git archive %s %s | tar -C %s -xf -" % (rev, ' '.join(quoted_args), quoted_target)
+ logger.debug('First cmdline length %d' % len(cmd))
+ runcmd(cmd, **largs)
+ cmd = "git add -f".split() + [os.path.join(dest_dir, x) for x in unquoted_args]
+ logger.debug('Second cmdline length %d' % reduce(lambda x, y: x + len(y), cmd, 0))
+ runcmd(cmd, **wargs)
+ if delete:
+ for path in delete:
+ if dest_dir:
+ path = os.path.join(dest_dir, path)
+ runcmd("git rm -f --ignore-unmatch".split() + [os.path.join(dest_dir, x) for x in delete], **wargs)
+
+def action_error(conf, args):
+ logger.info("invalid action %s" % args[0])
+
+actions = {
+ "init": action_init,
+ "update": action_update,
+ "pull": action_pull,
+ "splitpatch": action_splitpatch,
+}
+
+def main():
+ parser = optparse.OptionParser(
+ version = "Combo Layer Repo Tool version %s" % __version__,
+ usage = """%prog [options] action
+
+Create and update a combination layer repository from multiple component repositories.
+
+Action:
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ pull [components] just pull component repos only
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+
+ parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
+ action = "store", dest = "conffile", default = "conf/combo-layer.conf")
+
+ parser.add_option("-i", "--interactive", help = "interactive mode, user can edit the patch list and patches",
+ action = "store_true", dest = "interactive", default = False)
+
+ parser.add_option("-D", "--debug", help = "output debug information",
+ action = "store_true", dest = "debug", default = False)
+
+ parser.add_option("-n", "--no-pull", help = "skip pulling component repos during update",
+ action = "store_true", dest = "nopull", default = False)
+
+ parser.add_option("--hard-reset",
+ help = "instead of pull do fetch and hard-reset in component repos",
+ action = "store_true", dest = "hard_reset", default = False)
+
+ parser.add_option("-H", "--history", help = "import full history of components during init",
+ action = "store_true", default = False)
+
+ options, args = parser.parse_args(sys.argv)
+
+ # Dispatch to action handler
+ if len(args) == 1:
+ logger.error("No action specified, exiting")
+ parser.print_help()
+ elif args[1] not in actions:
+ logger.error("Unsupported action %s, exiting\n" % (args[1]))
+ parser.print_help()
+ elif not os.path.exists(options.conffile):
+ logger.error("No valid config file, exiting\n")
+ parser.print_help()
+ else:
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+ confdata = Configuration(options)
+ initmode = (args[1] == 'init')
+ confdata.sanity_check(initmode)
+ actions.get(args[1], action_error)(confdata, args[1:])
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/combo-layer-hook-default.sh b/poky/scripts/combo-layer-hook-default.sh
new file mode 100755
index 000000000..1e3a3b9bc
--- /dev/null
+++ b/poky/scripts/combo-layer-hook-default.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+# Hook to add source component/revision info to commit message
+# Parameter:
+# $1 patch-file
+# $2 revision
+# $3 reponame
+
+patchfile=$1
+rev=$2
+reponame=$3
+
+sed -i -e "0,/^Subject:/s#^Subject: \[PATCH\] \($reponame: \)*\(.*\)#Subject: \[PATCH\] $reponame: \2#" $patchfile
+if grep -q '^Signed-off-by:' $patchfile; then
+ # Insert before Signed-off-by.
+ sed -i -e "0,/^Signed-off-by:/s#\(^Signed-off-by:.*\)#\(From $reponame rev: $rev\)\n\n\1#" $patchfile
+else
+ # Insert before final --- separator, with extra blank lines removed.
+ perl -e "\$_ = join('', <>); s/^(.*\S[ \t]*)(\n|\n\s*\n)---\n/\$1\n\nFrom $reponame rev: $rev\n---\n/s; print;" $patchfile >$patchfile.tmp
+ mv $patchfile.tmp $patchfile
+fi
diff --git a/poky/scripts/combo-layer.conf.example b/poky/scripts/combo-layer.conf.example
new file mode 100644
index 000000000..90e2b5872
--- /dev/null
+++ b/poky/scripts/combo-layer.conf.example
@@ -0,0 +1,93 @@
+# combo-layer example configuration file
+
+# Default values for all sections.
+[DEFAULT]
+
+# Add 'Signed-off-by' to all commits that get imported automatically.
+signoff = True
+
+# component name
+[bitbake]
+
+# Override signedoff default above (not very useful, but possible).
+signoff = False
+
+# mandatory options
+# git upstream uri
+src_uri = git://git.openembedded.org/bitbake
+
+# the directory to clone the component repo
+local_repo_dir = /home/kyu3/src/test/bitbake
+
+# the relative dir within the combo repo to put the component files
+# use "." if the files should be in the root dir
+dest_dir = bitbake
+
+# the last update revision.
+# "init" will set this to the latest revision automatically, however if it
+# is empty when "update" is run, the tool will start from the first commit.
+# Note that this value will get updated by "update" if the component repo's
+# latest revision changed and the operation completes successfully.
+last_revision =
+
+# optional options:
+
+# branch: specify the branch in the component repo to pull from
+# (master if not specified)
+
+# file_filter: only include the specified file(s)
+# file_filter = [path] [path] ...
+# example:
+# file_filter = src/ : only include the subdir src
+# file_filter = src/*.c : only include the src *.c file
+# file_filter = src/main.c src/Makefile.am : only include these two files
+
+# file_exclude: filter out these file(s)
+# file_exclude = [path] [path] ...
+#
+# Each entry must match a file name. In contrast do file_filter, matching
+# a directory has no effect. To achieve that, use append a * wildcard
+# at the end.
+#
+# Wildcards are applied to the complete path and also match slashes.
+#
+# example:
+# file_exclude = src/foobar/* : exclude everything under src/foobar
+# file_exclude = src/main.c : filter out main.c after including it with file_filter = src/*.c
+# file_exclude = *~ : exclude backup files
+
+# hook: if provided, the tool will call the hook to process the generated
+# patch from upstream, and then apply the modified patch to the combo
+# repo.
+# the hook script is called as follows: ./hook patchpath revision reponame
+# example:
+# hook = combo-layer-hook-default.sh
+
+# since_revision:
+# since_revision = release-1-2
+# since_revision = 12345 abcdf
+#
+# If provided, truncate imported history during "combo-layer --history
+# init" at the specified revision(s). More than one can be specified
+# to cut off multiple component branches.
+#
+# The specified commits themselves do not get imported. Instead, an
+# artificial commit with "unknown" author is created with a content
+# that matches the original commit.
+
+[oe-core]
+src_uri = git://git.openembedded.org/openembedded-core
+local_repo_dir = /home/kyu3/src/test/oecore
+dest_dir = .
+last_revision =
+since_revision = some-tag-or-commit-on-master-branch
+
+# It is also possible to embed python code in the config values. Similar
+# to bitbake it considers every value starting with @ to be a python
+# script.
+# e.g. local_repo_dir could easily be configured using an environment
+# variable:
+#
+# [bitbake]
+# local_repo_dir = @os.getenv("LOCAL_REPO_DIR") + "/bitbake"
+#
diff --git a/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh b/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh
new file mode 100755
index 000000000..136a25570
--- /dev/null
+++ b/poky/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -0,0 +1,137 @@
+#!/bin/bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
+# the header by skipping the first line, but error messages and bad data records
+# need to be removed first. It will generate three views of the plot, and leave
+# an interactive view open for further analysis.
+#
+# AUTHORS
+# Darren Hart <dvhart@linux.intel.com>
+#
+
+# Setup the defaults
+DATFILE="bb-matrix.dat"
+XLABEL="BB_NUMBER_THREADS"
+YLABEL="PARALLEL_MAKE"
+FIELD=3
+DEF_TITLE="Elapsed Time (seconds)"
+PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100"
+SIZE="640,480"
+
+function usage {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-d datfile] [-f field] [-h] [-t title] [-w]
+ -d datfile The data file generated by bb-matrix.sh (default: $DATFILE)
+ -f field The field index to plot as the Z axis from the data file
+ (default: $FIELD, "$DEF_TITLE")
+ -h Display this help message
+ -s W,H PNG and window size in pixels (default: $SIZE)
+ -t title The title to display, should describe the field (-f) and units
+ (default: "$DEF_TITLE")
+ -w Render the plot as wireframe with a 2D colormap projected on the
+ XY plane rather than as the texture for the surface
+EOM
+}
+
+# Parse and validate arguments
+while getopts "d:f:hs:t:w" OPT; do
+ case $OPT in
+ d)
+ DATFILE="$OPTARG"
+ ;;
+ f)
+ FIELD="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ s)
+ SIZE="$OPTARG"
+ ;;
+ t)
+ TITLE="$OPTARG"
+ ;;
+ w)
+ PM3D_FRAGMENT="set pm3d at b"
+ W="-w"
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# Ensure the data file exists
+if [ ! -f "$DATFILE" ]; then
+ echo "ERROR: $DATFILE does not exist"
+ usage
+ exit 1
+fi
+PLOT_BASENAME=${DATFILE%.*}-f$FIELD$W
+
+# Set a sane title
+# TODO: parse the header and define titles for each format parameter for TIME(1)
+if [ -z "$TITLE" ]; then
+ if [ ! "$FIELD" == "3" ]; then
+ TITLE="Field $FIELD"
+ else
+ TITLE="$DEF_TITLE"
+ fi
+fi
+
+# Determine the dgrid3d mesh dimensions size
+MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | head -n1)
+MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sed 's/^0*//' | sort -n | uniq | tail -n1)
+BB_CNT=$[${MAX} - $MIN + 1]
+MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | head -n1)
+MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sed 's/^0*//' | sort -n | uniq | tail -n1)
+PM_CNT=$[${MAX} - $MIN + 1]
+
+
+(cat <<EOF
+set title "$TITLE"
+set xlabel "$XLABEL"
+set ylabel "$YLABEL"
+set style line 100 lt 5 lw 1.5
+$PM3D_FRAGMENT
+set dgrid3d $PM_CNT,$BB_CNT splines
+set ticslevel 0.2
+
+set term png size $SIZE
+set output "$PLOT_BASENAME.png"
+splot "$DATFILE" every ::1 using 1:2:$FIELD with lines ls 100
+
+set view 90,0
+set output "$PLOT_BASENAME-bb.png"
+replot
+
+set view 90,90
+set output "$PLOT_BASENAME-pm.png"
+replot
+
+set view 60,30
+set term wxt size $SIZE
+replot
+EOF
+) | gnuplot --persist
diff --git a/poky/scripts/contrib/bb-perf/bb-matrix.sh b/poky/scripts/contrib/bb-perf/bb-matrix.sh
new file mode 100755
index 000000000..106456584
--- /dev/null
+++ b/poky/scripts/contrib/bb-perf/bb-matrix.sh
@@ -0,0 +1,79 @@
+#!/bin/bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This script runs BB_CMD (typically building core-image-sato) for all
+# combincations of BB_RANGE and PM_RANGE values. It saves off all the console
+# logs, the buildstats directories, and creates a bb-pm-runtime.dat file which
+# can be used to postprocess the results with a plotting tool, spreadsheet, etc.
+# Before running this script, it is recommended that you pre-download all the
+# necessary sources by performing the BB_CMD once manually. It is also a good
+# idea to disable cron to avoid runtime variations caused by things like the
+# locate process. Be sure to sanitize the dat file prior to post-processing as
+# it may contain error messages or bad runs that should be removed.
+#
+# AUTHORS
+# Darren Hart <dvhart@linux.intel.com>
+#
+
+# The following ranges are appropriate for a 4 core system with 8 logical units
+# Use leading 0s to ensure all digits are the same string length, this results
+# in nice log file names and columnar dat files.
+BB_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16"
+PM_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16"
+
+DATADIR="bb-matrix-$$"
+BB_CMD="bitbake core-image-minimal"
+RUNTIME_LOG="$DATADIR/bb-matrix.dat"
+
+# See TIME(1) for a description of the time format parameters
+# The following all report 0: W K r s t w
+TIME_STR="%e %S %U %P %c %w %R %F %M %x"
+
+# Prepare the DATADIR
+mkdir $DATADIR
+if [ $? -ne 0 ]; then
+ echo "Failed to create $DATADIR."
+ exit 1
+fi
+
+# Add a simple header
+echo "BB PM $TIME_STR" > $RUNTIME_LOG
+for BB in $BB_RANGE; do
+ for PM in $PM_RANGE; do
+ RUNDIR="$DATADIR/$BB-$PM-build"
+ mkdir $RUNDIR
+ BB_LOG=$RUNDIR/$BB-$PM-bitbake.log
+ date
+ echo "BB=$BB PM=$PM Logging to $BB_LOG"
+
+ echo -n " Preparing the work directory... "
+ rm -rf pseudodone tmp sstate-cache tmp-eglibc &> /dev/null
+ echo "done"
+
+ # Export the variables under test and run the bitbake command
+ # Strip any leading zeroes before passing to bitbake
+ export BB_NUMBER_THREADS=$(echo $BB | sed 's/^0*//')
+ export PARALLEL_MAKE="-j $(echo $PM | sed 's/^0*//')"
+ /usr/bin/time -f "$BB $PM $TIME_STR" -a -o $RUNTIME_LOG $BB_CMD &> $BB_LOG
+
+ echo " $(tail -n1 $RUNTIME_LOG)"
+ cp -a tmp/buildstats $RUNDIR/$BB-$PM-buildstats
+ done
+done
diff --git a/poky/scripts/contrib/bb-perf/buildstats-plot.sh b/poky/scripts/contrib/bb-perf/buildstats-plot.sh
new file mode 100755
index 000000000..7e8ae0410
--- /dev/null
+++ b/poky/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -0,0 +1,157 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+#
+# Produces script data to be consumed by gnuplot. There are two possible plots
+# depending if either the -S parameter is present or not:
+#
+# * without -S: Produces a histogram listing top N recipes/tasks versus
+# stats. The first stat defined in the -s parameter is the one taken
+# into account for ranking
+# * -S: Produces a histogram listing tasks versus stats. In this case,
+# the value of each stat is the sum for that particular stat in all recipes found.
+# Stats values are in descending order defined by the first stat defined on -s
+#
+# EXAMPLES
+#
+# 1. Top recipes' tasks taking into account utime
+#
+# $ buildstats-plot.sh -s utime | gnuplot -p
+#
+# 2. Tasks versus utime:stime
+#
+# $ buildstats-plot.sh -s utime:stime -S | gnuplot -p
+#
+# 3. Tasks versus IO write_bytes:IO read_bytes
+#
+# $ buildstats-plot.sh -s 'IO write_bytes:IO read_bytes' -S | gnuplot -p
+#
+# AUTHORS
+# Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
+#
+
+set -o nounset
+set -o errexit
+
+BS_DIR="tmp/buildstats"
+N=10
+STATS="utime"
+SUM=""
+OUTDATA_FILE="$PWD/buildstats-plot.out"
+
+function usage {
+ CMD=$(basename $0)
+ cat <<EOM
+Usage: $CMD [-b buildstats_dir] [-t do_task]
+ -b buildstats The path where the folder resides
+ (default: "$BS_DIR")
+ -n N Top N recipes to display. Ignored if -S is present
+ (default: "$N")
+ -s stats The stats to be matched. If more that one stat, units
+ should be the same because data is plot as histogram.
+ (see buildstats.sh -h for all options) or any other defined
+ (build)stat separated by colons, i.e. stime:utime
+ (default: "$STATS")
+ -S Sum values for a particular stat for found recipes
+ -o Output data file.
+ (default: "$OUTDATA_FILE")
+ -h Display this help message
+EOM
+}
+
+# Parse and validate arguments
+while getopts "b:n:s:o:Sh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ n)
+ N="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ S)
+ SUM="y"
+ ;;
+ o)
+ OUTDATA_FILE="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# Get number of stats
+IFS=':'; statsarray=(${STATS}); unset IFS
+nstats=${#statsarray[@]}
+
+# Get script folder, use to run buildstats.sh
+CD=$(dirname $0)
+
+# Parse buildstats recipes to produce a single table
+OUTBUILDSTATS="$PWD/buildstats.log"
+$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+
+# Get headers
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+
+echo -e "set boxwidth 0.9 relative"
+echo -e "set style data histograms"
+echo -e "set style fill solid 1.0 border lt -1"
+echo -e "set xtics rotate by 45 right"
+
+# Get output data
+if [ -z "$SUM" ]; then
+ cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+ # include task at recipe column
+ sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+ echo -e "set title \"Top task/recipes\""
+ echo -e "plot for [COL=3:`expr 3 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(stringcolumn(1).' '.stringcolumn(2)) title columnheader(COL)"
+else
+
+ # Construct datatamash sum argument (sum 3 sum 4 ...)
+ declare -a sumargs
+ j=0
+ for i in `seq $nstats`; do
+ sumargs[j]=sum; j=$(( $j + 1 ))
+ sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
+ done
+
+ # Do the processing with datamash
+ cat $OUTBUILDSTATS | sed -e '1d' | datamash -t ' ' -g1 ${sumargs[*]} | sort -k2 -n -r > $OUTDATA_FILE
+
+ # Include headers into resulted file, so we can include gnuplot xtics
+ HEADERS=$(echo $HEADERS | sed -e 's/recipe//1')
+ sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+
+ # Plot
+ echo -e "set title \"Sum stats values per task for all recipes\""
+ echo -e "plot for [COL=2:`expr 2 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(1) title columnheader(COL)"
+fi
+
diff --git a/poky/scripts/contrib/bb-perf/buildstats.sh b/poky/scripts/contrib/bb-perf/buildstats.sh
new file mode 100755
index 000000000..8d7e2488f
--- /dev/null
+++ b/poky/scripts/contrib/bb-perf/buildstats.sh
@@ -0,0 +1,155 @@
+#!/bin/bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# Given 'buildstats' data (generate by bitbake when setting
+# USER_CLASSES ?= "buildstats" on local.conf), task names and a stats values
+# (these are the ones preset on the buildstats files), outputs
+# '<task> <recipe> <value_1> <value_2> ... <value_n>'. The units are the ones
+# defined at buildstats, which in turn takes data from /proc/[pid] files
+#
+# Some useful pipelines
+#
+# 1. Tasks with largest stime (Amount of time that this process has been scheduled
+# in kernel mode) values
+# $ buildstats.sh -b <buildstats> -s stime | sort -k3 -n -r | head
+#
+# 2. Min, max, sum utime (Amount of time that this process has been scheduled
+# in user mode) per task (in needs GNU datamash)
+# $ buildstats.sh -b <buildstats> -s utime | datamash -t' ' -g1 min 3 max 3 sum 3 | sort -k4 -n -r
+#
+# AUTHORS
+# Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
+#
+
+# Stats, by type
+TIME="utime:stime:cutime:cstime"
+IO="IO wchar:IO write_bytes:IO syscr:IO read_bytes:IO rchar:IO syscw:IO cancelled_write_bytes"
+RUSAGE="rusage ru_utime:rusage ru_stime:rusage ru_maxrss:rusage ru_minflt:rusage ru_majflt:\
+rusage ru_inblock:rusage ru_oublock:rusage ru_nvcsw:rusage ru_nivcsw"
+
+CHILD_RUSAGE="Child rusage ru_utime:Child rusage ru_stime:Child rusage ru_maxrss:Child rusage ru_minflt:\
+Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rusage ru_nvcsw:\
+Child rusage ru_nivcsw"
+
+BS_DIR="tmp/buildstats"
+TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
+STATS="$TIME"
+HEADER="" # No header by default
+
+function usage {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-b buildstats_dir] [-t do_task]
+ -b buildstats The path where the folder resides
+ (default: "$BS_DIR")
+ -t tasks The tasks to be computed
+ (default: "$TASKS")
+ -s stats The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
+ or any other defined buildstat separated by colons, i.e. stime:utime
+ (default: "$STATS")
+ Default stat sets:
+ TIME=$TIME
+ IO=$IO
+ RUSAGE=$RUSAGE
+ CHILD_RUSAGE=$CHILD_RUSAGE
+ -h Display this help message
+EOM
+}
+
+# Parse and validate arguments
+while getopts "b:t:s:Hh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ H)
+ HEADER="y"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# Ensure the buildstats folder exists
+if [ ! -d "$BS_DIR" ]; then
+ echo "ERROR: $BS_DIR does not exist"
+ usage
+ exit 1
+fi
+
+stats=""
+IFS=":"
+for stat in ${STATS}; do
+ case $stat in
+ TIME)
+ stats="${stats}:${TIME}"
+ ;;
+ IO)
+ stats="${stats}:${IO}"
+ ;;
+ RUSAGE)
+ stats="${stats}:${RUSAGE}"
+ ;;
+ CHILD_RUSAGE)
+ stats="${stats}:${CHILD_RUSAGE}"
+ ;;
+ *)
+ stats="${STATS}"
+ esac
+done
+
+# remove possible colon at the beginning
+stats="$(echo "$stats" | sed -e 's/^://1')"
+
+# Provide a header if required by the user
+[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
+
+for task in ${TASKS}; do
+ task="do_${task}"
+ for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+ recipe="$(basename $(dirname $file))"
+ times=""
+ for stat in ${stats}; do
+ [ -z "$stat" ] && { echo "empty stats"; }
+ time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+ # in case the stat is not present, set the value as NA
+ [ -z "$time" ] && { time="NA"; }
+ # Append it to times
+ if [ -z "$times" ]; then
+ times="${time}"
+ else
+ times="${times} ${time}"
+ fi
+ done
+ echo "${task} ${recipe} ${times}"
+ done
+done
diff --git a/poky/scripts/contrib/bbvars.py b/poky/scripts/contrib/bbvars.py
new file mode 100755
index 000000000..286b5a940
--- /dev/null
+++ b/poky/scripts/contrib/bbvars.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
+
+
+import sys
+import getopt
+import os
+import os.path
+import re
+
+# Set up sys.path to let us import tinfoil
+scripts_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+lib_path = scripts_path + '/lib'
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+import bb.tinfoil
+
+def usage():
+ print('Usage: %s -d FILENAME [-d FILENAME]*' % os.path.basename(sys.argv[0]))
+ print(' -d FILENAME documentation file to search')
+ print(' -h, --help display this help and exit')
+ print(' -t FILENAME documentation config file (for doc tags)')
+ print(' -T Only display variables with doc tags (requires -t)')
+
+def bbvar_is_documented(var, documented_vars):
+ ''' Check if variable (var) is in the list of documented variables(documented_vars) '''
+ if var in documented_vars:
+ return True
+ else:
+ return False
+
+def collect_documented_vars(docfiles):
+ ''' Walk the docfiles and collect the documented variables '''
+ documented_vars = []
+ prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ for d in docfiles:
+ with open(d) as f:
+ documented_vars += var_prog.findall(f.read())
+
+ return documented_vars
+
+def bbvar_doctag(var, docconf):
+ prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ if docconf == "":
+ return "?"
+
+ try:
+ f = open(docconf)
+ except IOError as err:
+ return err.args[1]
+
+ for line in f:
+ m = prog.search(line)
+ if m:
+ return m.group(1)
+
+ f.close()
+ return ""
+
+def main():
+ docfiles = []
+ bbvars = set()
+ undocumented = []
+ docconf = ""
+ onlydoctags = False
+
+ # Collect and validate input
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"])
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
+ usage()
+ sys.exit(2)
+
+ for o, a in opts:
+ if o in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+ elif o == '-d':
+ if os.path.isfile(a):
+ docfiles.append(a)
+ else:
+ print('ERROR: documentation file %s is not a regular file' % a)
+ sys.exit(3)
+ elif o == "-t":
+ if os.path.isfile(a):
+ docconf = a
+ elif o == "-T":
+ onlydoctags = True
+ else:
+ assert False, "unhandled option"
+
+ if len(docfiles) == 0:
+ print('ERROR: no docfile specified')
+ usage()
+ sys.exit(5)
+
+ if onlydoctags and docconf == "":
+ print('ERROR: no docconf specified')
+ usage()
+ sys.exit(7)
+
+ prog = re.compile("^[^a-z]*$")
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+ parser = bb.codeparser.PythonParser('parser', None)
+ datastore = tinfoil.config_data
+
+ def bbvars_update(data):
+ if prog.match(data):
+ bbvars.add(data)
+ if tinfoil.config_data.getVarFlag(data, 'python'):
+ try:
+ parser.parse_python(tinfoil.config_data.getVar(data))
+ except bb.data_smart.ExpansionError:
+ pass
+ for var in parser.references:
+ if prog.match(var):
+ bbvars.add(var)
+ else:
+ try:
+ expandedVar = datastore.expandWithRefs(datastore.getVar(data, False), data)
+ for var in expandedVar.references:
+ if prog.match(var):
+ bbvars.add(var)
+ except bb.data_smart.ExpansionError:
+ pass
+
+ # Use tinfoil to collect all the variable names globally
+ for data in datastore:
+ bbvars_update(data)
+
+ # Collect variables from all recipes
+ for recipe in tinfoil.all_recipe_files(variants=False):
+ print("Checking %s" % recipe)
+ for data in tinfoil.parse_recipe_file(recipe):
+ bbvars_update(data)
+
+ documented_vars = collect_documented_vars(docfiles)
+
+ # Check each var for documentation
+ varlen = 0
+ for v in bbvars:
+ if len(v) > varlen:
+ varlen = len(v)
+ if not bbvar_is_documented(v, documented_vars):
+ undocumented.append(v)
+ undocumented.sort()
+ varlen = varlen + 1
+
+ # Report all undocumented variables
+ print('Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars)))
+ header = '%s%s' % (str("VARIABLE").ljust(varlen), str("DOCTAG").ljust(7))
+ print(header)
+ print(str("").ljust(len(header), '='))
+ for v in undocumented:
+ doctag = bbvar_doctag(v, docconf)
+ if not onlydoctags or not doctag == "":
+ print('%s%s' % (v.ljust(varlen), doctag))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/contrib/build-perf-test-wrapper.sh b/poky/scripts/contrib/build-perf-test-wrapper.sh
new file mode 100755
index 000000000..19bee1dd0
--- /dev/null
+++ b/poky/scripts/contrib/build-perf-test-wrapper.sh
@@ -0,0 +1,239 @@
+#!/bin/bash
+#
+# Build performance test script wrapper
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+#
+# This script is a simple wrapper around the actual build performance tester
+# script. This script initializes the build environment, runs
+# oe-build-perf-test and archives the results.
+
+script=`basename $0`
+script_dir=$(realpath $(dirname $0))
+archive_dir=~/perf-results/archives
+
+usage () {
+cat << EOF
+Usage: $script [-h] [-c COMMITISH] [-C GIT_REPO]
+
+Optional arguments:
+ -h show this help and exit.
+ -a ARCHIVE_DIR archive results tarball here, give an empty string to
+ disable tarball archiving (default: $archive_dir)
+ -c COMMITISH test (checkout) this commit, <branch>:<commit> can be
+ specified to test specific commit of certain branch
+ -C GIT_REPO commit results into Git
+ -E EMAIL_ADDR send email report
+ -P GIT_REMOTE push results to a remote Git repository
+ -R DEST rsync reports to a remote destination
+ -w WORK_DIR work dir for this script
+ (default: GIT_TOP_DIR/build-perf-test)
+ -x create xml report (instead of json)
+EOF
+}
+
+get_os_release_var () {
+ ( source /etc/os-release; eval echo '$'$1 )
+}
+
+
+# Parse command line arguments
+commitish=""
+oe_build_perf_test_extra_opts=()
+oe_git_archive_extra_opts=()
+while getopts "ha:c:C:E:P:R:w:x" opt; do
+ case $opt in
+ h) usage
+ exit 0
+ ;;
+ a) archive_dir=`realpath -s "$OPTARG"`
+ ;;
+ c) commitish=$OPTARG
+ ;;
+ C) results_repo=`realpath -s "$OPTARG"`
+ ;;
+ E) email_to="$OPTARG"
+ ;;
+ P) oe_git_archive_extra_opts+=("--push" "$OPTARG")
+ ;;
+ R) rsync_dst="$OPTARG"
+ ;;
+ w) base_dir=`realpath -s "$OPTARG"`
+ ;;
+ x) oe_build_perf_test_extra_opts+=("--xml")
+ ;;
+ *) usage
+ exit 1
+ ;;
+ esac
+done
+
+# Check positional args
+shift "$((OPTIND - 1))"
+if [ $# -ne 0 ]; then
+ echo "ERROR: No positional args are accepted."
+ usage
+ exit 1
+fi
+
+# Open a file descriptor for flock and acquire lock
+LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
+if ! exec 3> "$LOCK_FILE"; then
+ echo "ERROR: Unable to open lock file"
+ exit 1
+fi
+if ! flock -n 3; then
+ echo "ERROR: Another instance of this script is running"
+ exit 1
+fi
+
+echo "Running on `uname -n`"
+if ! git_topdir=$(git rev-parse --show-toplevel); then
+ echo "The current working dir doesn't seem to be a git clone. Please cd there before running `basename $0`"
+ exit 1
+fi
+
+cd "$git_topdir"
+
+if [ -n "$commitish" ]; then
+ echo "Running git fetch"
+ git fetch &> /dev/null
+ git checkout HEAD^0 &> /dev/null
+
+ # Handle <branch>:<commit> format
+ if echo "$commitish" | grep -q ":"; then
+ commit=`echo "$commitish" | cut -d":" -f2`
+ branch=`echo "$commitish" | cut -d":" -f1`
+ else
+ commit="$commitish"
+ branch="$commitish"
+ fi
+
+ echo "Checking out $commitish"
+ git branch -D $branch &> /dev/null
+ if ! git checkout -f $branch &> /dev/null; then
+ echo "ERROR: Git checkout failed"
+ exit 1
+ fi
+
+ # Check that the specified branch really contains the commit
+ commit_hash=`git rev-parse --revs-only $commit --`
+ if [ -z "$commit_hash" -o "`git merge-base $branch $commit`" != "$commit_hash" ]; then
+ echo "ERROR: branch $branch does not contain commit $commit"
+ exit 1
+ fi
+ git reset --hard $commit > /dev/null
+fi
+
+# Determine name of the current branch
+branch=`git symbolic-ref HEAD 2> /dev/null`
+# Strip refs/heads/
+branch=${branch:11}
+
+# Setup build environment
+if [ -z "$base_dir" ]; then
+ base_dir="$git_topdir/build-perf-test"
+fi
+echo "Using working dir $base_dir"
+
+timestamp=`date "+%Y%m%d%H%M%S"`
+git_rev=$(git rev-parse --short HEAD) || exit 1
+build_dir="$base_dir/build-$git_rev-$timestamp"
+results_dir="$base_dir/results-$git_rev-$timestamp"
+globalres_log="$base_dir/globalres.log"
+machine="qemux86"
+
+mkdir -p "$base_dir"
+source ./oe-init-build-env $build_dir >/dev/null || exit 1
+
+# Additional config
+auto_conf="$build_dir/conf/auto.conf"
+echo "MACHINE = \"$machine\"" > "$auto_conf"
+echo 'BB_NUMBER_THREADS = "8"' >> "$auto_conf"
+echo 'PARALLEL_MAKE = "-j 8"' >> "$auto_conf"
+echo "DL_DIR = \"$base_dir/downloads\"" >> "$auto_conf"
+# Disabling network sanity check slightly reduces the variance of timing results
+echo 'CONNECTIVITY_CHECK_URIS = ""' >> "$auto_conf"
+# Possibility to define extra settings
+if [ -f "$base_dir/auto.conf.extra" ]; then
+ cat "$base_dir/auto.conf.extra" >> "$auto_conf"
+fi
+
+# Run actual test script
+oe-build-perf-test --out-dir "$results_dir" \
+ --globalres-file "$globalres_log" \
+ "${oe_build_perf_test_extra_opts[@]}" \
+ --lock-file "$base_dir/oe-build-perf.lock"
+
+case $? in
+ 1) echo "ERROR: oe-build-perf-test script failed!"
+ exit 1
+ ;;
+ 2) echo "NOTE: some tests failed!"
+ ;;
+esac
+
+# Commit results to git
+if [ -n "$results_repo" ]; then
+ echo -e "\nArchiving results in $results_repo"
+ oe-git-archive \
+ --git-dir "$results_repo" \
+ --branch-name "{hostname}/{branch}/{machine}" \
+ --tag-name "{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}" \
+ --exclude "buildstats.json" \
+ --notes "buildstats/{branch_name}" "$results_dir/buildstats.json" \
+ "${oe_git_archive_extra_opts[@]}" \
+ "$results_dir"
+
+ # Generate test reports
+ sanitized_branch=`echo $branch | tr / _`
+ report_txt=`hostname`_${sanitized_branch}_${machine}.txt
+ report_html=`hostname`_${sanitized_branch}_${machine}.html
+ echo -e "\nGenerating test report"
+ oe-build-perf-report -r "$results_repo" > $report_txt
+ oe-build-perf-report -r "$results_repo" --html > $report_html
+
+ # Send email report
+ if [ -n "$email_to" ]; then
+ echo "Emailing test report"
+ os_name=`get_os_release_var PRETTY_NAME`
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ fi
+
+ # Upload report files, unless we're on detached head
+ if [ -n "$rsync_dst" -a -n "$branch" ]; then
+ echo "Uploading test report"
+ rsync $report_txt $report_html $rsync_dst
+ fi
+fi
+
+
+echo -ne "\n\n-----------------\n"
+echo "Global results file:"
+echo -ne "\n"
+
+cat "$globalres_log"
+
+if [ -n "$archive_dir" ]; then
+ echo -ne "\n\n-----------------\n"
+ echo "Archiving results in $archive_dir"
+ mkdir -p "$archive_dir"
+ results_basename=`basename "$results_dir"`
+ results_dirname=`dirname "$results_dir"`
+ tar -czf "$archive_dir/`uname -n`-${results_basename}.tar.gz" -C "$results_dirname" "$results_basename"
+fi
+
+rm -rf "$build_dir"
+rm -rf "$results_dir"
+
+echo "DONE"
diff --git a/poky/scripts/contrib/build-perf-test.sh b/poky/scripts/contrib/build-perf-test.sh
new file mode 100755
index 000000000..9a091edb0
--- /dev/null
+++ b/poky/scripts/contrib/build-perf-test.sh
@@ -0,0 +1,400 @@
+#!/bin/bash
+#
+# This script runs a series of tests (with and without sstate) and reports build time (and tmp/ size)
+#
+# Build performance test script
+#
+# Copyright 2013 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# AUTHORS:
+# Stefan Stanacar <stefanx.stanacar@intel.com>
+
+
+ME=$(basename $0)
+
+#
+# usage and setup
+#
+
+usage () {
+cat << EOT
+Usage: $ME [-h]
+ $ME [-c <commit>] [-v] [-m <val>] [-j <val>] [-t <val>] [-i <image-name>] [-d <path>]
+Options:
+ -h
+ Display this help and exit.
+ -c <commit>
+ git checkout <commit> before anything else
+ -v
+ Show bitbake output, don't redirect it to a log.
+ -m <machine>
+ Value for MACHINE. Default is qemux86.
+ -j <val>
+ Value for PARALLEL_MAKE. Default is 8.
+ -t <val>
+ Value for BB_NUMBER_THREADS. Default is 8.
+ -i <image-name>
+ Instead of timing against core-image-sato, use <image-name>
+ -d <path>
+ Use <path> as DL_DIR
+ -p <githash>
+ Cherry pick githash onto the commit
+
+Note: current working directory must be inside a poky git clone.
+
+EOT
+}
+
+
+if clonedir=$(git rev-parse --show-toplevel); then
+ cd $clonedir
+else
+ echo "The current working dir doesn't seem to be a poky git clone. Please cd there before running $ME"
+ exit 1
+fi
+
+IMAGE="core-image-sato"
+verbose=0
+dldir=
+commit=
+pmake=
+cherrypicks=
+while getopts "hvc:m:j:t:i:d:p:" opt; do
+ case $opt in
+ h) usage
+ exit 0
+ ;;
+ v) verbose=1
+ ;;
+ c) commit=$OPTARG
+ ;;
+ m) export MACHINE=$OPTARG
+ ;;
+ j) pmake=$OPTARG
+ ;;
+ t) export BB_NUMBER_THREADS=$OPTARG
+ ;;
+ i) IMAGE=$OPTARG
+ ;;
+ d) dldir=$OPTARG
+ ;;
+ p) cherrypicks="$cherrypicks $OPTARG"
+ ;;
+ *) usage
+ exit 1
+ ;;
+ esac
+done
+
+
+#drop cached credentials and test for sudo access without a password
+sudo -k -n ls > /dev/null 2>&1
+reqpass=$?
+if [ $reqpass -ne 0 ]; then
+ echo "The script requires sudo access to drop caches between builds (echo 3 > /proc/sys/vm/drop_caches)"
+ read -s -p "Please enter your sudo password: " pass
+ echo
+fi
+
+if [ -n "$commit" ]; then
+ echo "git checkout -f $commit"
+ git pull > /dev/null 2>&1
+ git checkout -f $commit || exit 1
+ git pull > /dev/null 2>&1
+fi
+
+if [ -n "$cherrypicks" ]; then
+ for c in $cherrypicks; do
+ git cherry-pick $c
+ done
+fi
+
+rev=$(git rev-parse --short HEAD) || exit 1
+OUTDIR="$clonedir/build-perf-test/results-$rev-`date "+%Y%m%d%H%M%S"`"
+BUILDDIR="$OUTDIR/build"
+resultsfile="$OUTDIR/results.log"
+cmdoutput="$OUTDIR/commands.log"
+myoutput="$OUTDIR/output.log"
+globalres="$clonedir/build-perf-test/globalres.log"
+
+mkdir -p $OUTDIR || exit 1
+
+log () {
+ local msg="$1"
+ echo "`date`: $msg" | tee -a $myoutput
+}
+
+
+#
+# Config stuff
+#
+
+branch=`git branch 2>&1 | grep "^* " | tr -d "* "`
+gitcommit=$(git rev-parse HEAD) || exit 1
+log "Running on $branch:$gitcommit"
+
+source ./oe-init-build-env $OUTDIR/build >/dev/null || exit 1
+cd $OUTDIR/build
+
+[ -n "$MACHINE" ] || export MACHINE="qemux86"
+[ -n "$BB_NUMBER_THREADS" ] || export BB_NUMBER_THREADS="8"
+
+if [ -n "$pmake" ]; then
+ export PARALLEL_MAKE="-j $pmake"
+else
+ export PARALLEL_MAKE="-j 8"
+fi
+
+if [ -n "$dldir" ]; then
+ echo "DL_DIR = \"$dldir\"" >> conf/local.conf
+else
+ echo "DL_DIR = \"$clonedir/build-perf-test/downloads\"" >> conf/local.conf
+fi
+
+# Sometimes I've noticed big differences in timings for the same commit, on the same machine
+# Disabling the network sanity check helps a bit (because of my crappy network connection and/or proxy)
+echo "CONNECTIVITY_CHECK_URIS =\"\"" >> conf/local.conf
+
+
+#
+# Functions
+#
+
+declare -a TIMES
+time_count=0
+declare -a SIZES
+size_count=0
+
+time_cmd () {
+ log " Timing: $*"
+
+ if [ $verbose -eq 0 ]; then
+ /usr/bin/time -v -o $resultsfile "$@" >> $cmdoutput
+ else
+ /usr/bin/time -v -o $resultsfile "$@"
+ fi
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ t=`grep wall $resultsfile | sed 's/.*m:ss): //'`
+ log " TIME: $t"
+ TIMES[(( time_count++ ))]="$t"
+ else
+ log "ERROR: exit status was non-zero, will report time as 0."
+ TIMES[(( time_count++ ))]="0"
+ fi
+
+ #time by default overwrites the output file and we want to keep the results
+ #it has an append option but I don't want to clobber the results in the same file
+ i=`ls $OUTDIR/results.log* |wc -l`
+ mv $resultsfile "${resultsfile}.${i}"
+ log "More stats can be found in ${resultsfile}.${i}"
+}
+
+bbtime () {
+ time_cmd bitbake "$@"
+}
+
+#we don't time bitbake here
+bbnotime () {
+ local arg="$@"
+ log " Running: bitbake ${arg}"
+ if [ $verbose -eq 0 ]; then
+ bitbake ${arg} >> $cmdoutput
+ else
+ bitbake ${arg}
+ fi
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ log " Finished bitbake ${arg}"
+ else
+ log "ERROR: exit status was non-zero. Exit.."
+ exit $ret
+ fi
+
+}
+
+do_rmtmp() {
+ log " Removing tmp"
+ rm -rf bitbake.lock pseudodone conf/sanity_info cache tmp
+}
+do_rmsstate () {
+ log " Removing sstate-cache"
+ rm -rf sstate-cache
+}
+do_sync () {
+ log " Syncing and dropping caches"
+ sync; sync
+ if [ $reqpass -eq 0 ]; then
+ sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"
+ else
+ echo "$pass" | sudo -S sh -c "echo 3 > /proc/sys/vm/drop_caches"
+ echo
+ fi
+ sleep 3
+}
+
+write_results() {
+ echo -n "`uname -n`,$branch:$gitcommit,`git describe`," >> $globalres
+ for i in "${TIMES[@]}"; do
+ echo -n "$i," >> $globalres
+ done
+ for i in "${SIZES[@]}"; do
+ echo -n "$i," >> $globalres
+ done
+ echo >> $globalres
+ sed -i '$ s/,$//' $globalres
+}
+
+####
+
+#
+# Test 1
+# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir (w/o rm_work and w/ rm_work)
+# Pre: Downloaded sources, no sstate
+# Steps:
+# Part1:
+# - fetchall
+# - clean build dir
+# - time bitbake core-image-sato
+# - collect data
+# Part2:
+# - bitbake virtual/kernel -c cleansstate
+# - time bitbake virtual/kernel
+# Part3:
+# - add INHERIT to local.conf
+# - clean build dir
+# - build
+# - report size, remove INHERIT
+
+test1_p1 () {
+ log "Running Test 1, part 1/3: Measure wall clock of bitbake $IMAGE and size of tmp/ dir"
+ bbnotime $IMAGE --runall=fetch
+ do_rmtmp
+ do_rmsstate
+ do_sync
+ bbtime $IMAGE
+ s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
+ SIZES[(( size_count++ ))]="$s"
+ log "SIZE of tmp dir is: $s"
+ log "Buildstats are saved in $OUTDIR/buildstats-test1"
+ mv tmp/buildstats $OUTDIR/buildstats-test1
+}
+
+
+test1_p2 () {
+ log "Running Test 1, part 2/3: bitbake virtual/kernel -c cleansstate and time bitbake virtual/kernel"
+ bbnotime virtual/kernel -c cleansstate
+ do_sync
+ bbtime virtual/kernel
+}
+
+test1_p3 () {
+ log "Running Test 1, part 3/3: Build $IMAGE w/o sstate and report size of tmp/dir with rm_work enabled"
+ echo "INHERIT += \"rm_work\"" >> conf/local.conf
+ do_rmtmp
+ do_rmsstate
+ do_sync
+ bbtime $IMAGE
+ sed -i 's/INHERIT += \"rm_work\"//' conf/local.conf
+ s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
+ SIZES[(( size_count++ ))]="$s"
+ log "SIZE of tmp dir is: $s"
+ log "Buildstats are saved in $OUTDIR/buildstats-test13"
+ mv tmp/buildstats $OUTDIR/buildstats-test13
+}
+
+
+#
+# Test 2
+# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir
+# Pre: populated sstate cache
+
+test2 () {
+ # Assuming test 1 has run
+ log "Running Test 2: Measure wall clock of bitbake $IMAGE -c rootfs with sstate"
+ do_rmtmp
+ do_sync
+ bbtime $IMAGE -c rootfs
+}
+
+
+# Test 3
+# parsing time metrics
+#
+# Start with
+# i) "rm -rf tmp/cache; time bitbake -p"
+# ii) "rm -rf tmp/cache/default-glibc/; time bitbake -p"
+# iii) "time bitbake -p"
+
+
+test3 () {
+ log "Running Test 3: Parsing time metrics (bitbake -p)"
+ log " Removing tmp/cache && cache"
+ rm -rf tmp/cache cache
+ bbtime -p
+ log " Removing tmp/cache/default-glibc/"
+ rm -rf tmp/cache/default-glibc/
+ bbtime -p
+ bbtime -p
+}
+
+#
+# Test 4 - eSDK
+# Measure: eSDK size and installation time
+test4 () {
+ log "Running Test 4: eSDK size and installation time"
+ bbnotime $IMAGE -c do_populate_sdk_ext
+
+ esdk_installer=(tmp/deploy/sdk/*-toolchain-ext-*.sh)
+
+ if [ ${#esdk_installer[*]} -eq 1 ]; then
+ s=$((`stat -c %s "$esdk_installer"` / 1024))
+ SIZES[(( size_count++ ))]="$s"
+ log "Download SIZE of eSDK is: $s kB"
+
+ do_sync
+ time_cmd "$esdk_installer" -y -d "tmp/esdk-deploy"
+
+ s=$((`du -sb "tmp/esdk-deploy" | cut -f1` / 1024))
+ SIZES[(( size_count++ ))]="$s"
+ log "Install SIZE of eSDK is: $s kB"
+ else
+ log "ERROR: other than one sdk found (${esdk_installer[*]}), reporting size and time as 0."
+ SIZES[(( size_count++ ))]="0"
+ TIMES[(( time_count++ ))]="0"
+ fi
+
+}
+
+
+# RUN!
+
+test1_p1
+test1_p2
+test1_p3
+test2
+test3
+test4
+
+# if we got til here write to global results
+write_results
+
+log "All done, cleaning up..."
+
+do_rmtmp
+do_rmsstate
diff --git a/poky/scripts/contrib/ddimage b/poky/scripts/contrib/ddimage
new file mode 100755
index 000000000..ab929957a
--- /dev/null
+++ b/poky/scripts/contrib/ddimage
@@ -0,0 +1,108 @@
+#!/bin/sh
+
+# Default to avoiding the first two disks on typical Linux and Mac OS installs
+# Better safe than sorry :-)
+BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2"
+
+# 1MB blocksize
+BLOCKSIZE=1048576
+
+usage() {
+ echo "Usage: $(basename $0) IMAGE DEVICE"
+}
+
+image_details() {
+ IMG=$1
+ echo "Image details"
+ echo "============="
+ echo " image: $(basename $IMG)"
+ # stat format is different on Mac OS and Linux
+ if [ "$(uname)" = "Darwin" ]; then
+ echo " size: $(stat -L -f '%z bytes' $IMG)"
+ echo " modified: $(stat -L -f '%Sm' $IMG)"
+ else
+ echo " size: $(stat -L -c '%s bytes' $IMG)"
+ echo " modified: $(stat -L -c '%y' $IMG)"
+ fi
+ echo " type: $(file -L -b $IMG)"
+ echo ""
+}
+
+device_details() {
+ DEV=$1
+ BLOCK_SIZE=512
+
+ echo "Device details"
+ echo "=============="
+
+ # Collect disk info using diskutil on Mac OS
+ if [ "$(uname)" = "Darwin" ]; then
+ diskutil info $DEVICE | egrep "(Device Node|Media Name|Total Size)"
+ return
+ fi
+
+ # Default / Linux information collection
+ echo " device: $DEVICE"
+ if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
+ echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
+ else
+ echo " vendor: UNKOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/device/model" ]; then
+ echo " model: $(cat /sys/class/block/$DEV/device/model)"
+ else
+ echo " model: UNKNOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/size" ]; then
+ echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes"
+ else
+ echo " size: UNKNOWN"
+ fi
+ echo ""
+}
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+IMAGE=$1
+DEVICE=$2
+
+if [ ! -e "$IMAGE" ]; then
+ echo "ERROR: Image $IMAGE does not exist"
+ usage
+ exit 1
+fi
+
+
+for i in ${BLACKLIST_DEVICES}; do
+ if [ "$i" = "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE is blacklisted"
+ exit 1
+ fi
+done
+
+if [ ! -w "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist or is not writable"
+ usage
+ exit 1
+fi
+
+image_details $IMAGE
+device_details $(basename $DEVICE)
+
+printf "Write $IMAGE to $DEVICE [y/N]? "
+read RESPONSE
+if [ "$RESPONSE" != "y" ]; then
+ echo "Write aborted"
+ exit 0
+fi
+
+echo "Writing image..."
+if which pv >/dev/null 2>&1; then
+ pv "$IMAGE" | dd of="$DEVICE" bs="$BLOCKSIZE"
+else
+ dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE"
+fi
+sync
diff --git a/poky/scripts/contrib/devtool-stress.py b/poky/scripts/contrib/devtool-stress.py
new file mode 100755
index 000000000..d555c51a6
--- /dev/null
+++ b/poky/scripts/contrib/devtool-stress.py
@@ -0,0 +1,256 @@
+#!/usr/bin/env python3
+
+# devtool stress tester
+#
+# Written by: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Copyright 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import sys
+import os
+import os.path
+import subprocess
+import re
+import argparse
+import logging
+import tempfile
+import shutil
+import signal
+import fnmatch
+
+scripts_lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib'))
+sys.path.insert(0, scripts_lib_path)
+import scriptutils
+import argparse_oe
+logger = scriptutils.logger_create('devtool-stress')
+
+def select_recipes(args):
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil()
+ tinfoil.prepare(False)
+
+ pkg_pn = tinfoil.cooker.recipecaches[''].pkg_pn
+ (latest_versions, preferred_versions) = bb.providers.findProviders(tinfoil.config_data, tinfoil.cooker.recipecaches[''], pkg_pn)
+
+ skip_classes = args.skip_classes.split(',')
+
+ recipelist = []
+ for pn in sorted(pkg_pn):
+ pref = preferred_versions[pn]
+ inherits = [os.path.splitext(os.path.basename(f))[0] for f in tinfoil.cooker.recipecaches[''].inherits[pref[1]]]
+ for cls in skip_classes:
+ if cls in inherits:
+ break
+ else:
+ recipelist.append(pn)
+
+ tinfoil.shutdown()
+
+ resume_from = args.resume_from
+ if resume_from:
+ if not resume_from in recipelist:
+ print('%s is not a testable recipe' % resume_from)
+ return 1
+ if args.only:
+ only = args.only.split(',')
+ for onlyitem in only:
+ for pn in recipelist:
+ if fnmatch.fnmatch(pn, onlyitem):
+ break
+ else:
+ print('%s does not match any testable recipe' % onlyitem)
+ return 1
+ else:
+ only = None
+ if args.skip:
+ skip = args.skip.split(',')
+ else:
+ skip = []
+
+ recipes = []
+ for pn in recipelist:
+ if resume_from:
+ if pn == resume_from:
+ resume_from = None
+ else:
+ continue
+
+ if args.only:
+ for item in only:
+ if fnmatch.fnmatch(pn, item):
+ break
+ else:
+ continue
+
+ skipit = False
+ for item in skip:
+ if fnmatch.fnmatch(pn, item):
+ skipit = True
+ if skipit:
+ continue
+
+ recipes.append(pn)
+
+ return recipes
+
+
+def stress_extract(args):
+ import bb.process
+
+ recipes = select_recipes(args)
+
+ failures = 0
+ tmpdir = tempfile.mkdtemp()
+ os.setpgrp()
+ try:
+ for pn in recipes:
+ sys.stdout.write('Testing %s ' % (pn + ' ').ljust(40, '.'))
+ sys.stdout.flush()
+ failed = False
+ skipped = None
+
+ srctree = os.path.join(tmpdir, pn)
+ try:
+ bb.process.run('devtool extract %s %s' % (pn, srctree))
+ except bb.process.ExecutionError as exc:
+ if exc.exitcode == 4:
+ skipped = 'incompatible'
+ else:
+ failed = True
+ with open('stress_%s_extract.log' % pn, 'w') as f:
+ f.write(str(exc))
+
+ if os.path.exists(srctree):
+ shutil.rmtree(srctree)
+
+ if failed:
+ print('failed')
+ failures += 1
+ elif skipped:
+ print('skipped (%s)' % skipped)
+ else:
+ print('ok')
+ except KeyboardInterrupt:
+ # We want any child processes killed. This is crude, but effective.
+ os.killpg(0, signal.SIGTERM)
+
+ if failures:
+ return 1
+ else:
+ return 0
+
+
+def stress_modify(args):
+ import bb.process
+
+ recipes = select_recipes(args)
+
+ failures = 0
+ tmpdir = tempfile.mkdtemp()
+ os.setpgrp()
+ try:
+ for pn in recipes:
+ sys.stdout.write('Testing %s ' % (pn + ' ').ljust(40, '.'))
+ sys.stdout.flush()
+ failed = False
+ reset = True
+ skipped = None
+
+ srctree = os.path.join(tmpdir, pn)
+ try:
+ bb.process.run('devtool modify -x %s %s' % (pn, srctree))
+ except bb.process.ExecutionError as exc:
+ if exc.exitcode == 4:
+ skipped = 'incompatible'
+ else:
+ with open('stress_%s_modify.log' % pn, 'w') as f:
+ f.write(str(exc))
+ failed = 'modify'
+ reset = False
+
+ if not skipped:
+ if not failed:
+ try:
+ bb.process.run('bitbake -c install %s' % pn)
+ except bb.process.CmdError as exc:
+ with open('stress_%s_install.log' % pn, 'w') as f:
+ f.write(str(exc))
+ failed = 'build'
+ if reset:
+ try:
+ bb.process.run('devtool reset %s' % pn)
+ except bb.process.CmdError as exc:
+ print('devtool reset failed: %s' % str(exc))
+ break
+
+ if os.path.exists(srctree):
+ shutil.rmtree(srctree)
+
+ if failed:
+ print('failed (%s)' % failed)
+ failures += 1
+ elif skipped:
+ print('skipped (%s)' % skipped)
+ else:
+ print('ok')
+ except KeyboardInterrupt:
+ # We want any child processes killed. This is crude, but effective.
+ os.killpg(0, signal.SIGTERM)
+
+ if failures:
+ return 1
+ else:
+ return 0
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="devtool stress tester",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-r', '--resume-from', help='Resume from specified recipe', metavar='PN')
+ parser.add_argument('-o', '--only', help='Only test specified recipes (comma-separated without spaces, wildcards allowed)', metavar='PNLIST')
+ parser.add_argument('-s', '--skip', help='Skip specified recipes (comma-separated without spaces, wildcards allowed)', metavar='PNLIST', default='gcc-source-*,kernel-devsrc,package-index,perf,meta-world-pkgdata,glibc-locale,glibc-mtrace,glibc-scripts,os-release')
+ parser.add_argument('-c', '--skip-classes', help='Skip recipes inheriting specified classes (comma-separated) - default %(default)s', metavar='CLASSLIST', default='native,nativesdk,cross,cross-canadian,image,populate_sdk,meta,packagegroup')
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ parser_modify = subparsers.add_parser('modify',
+ help='Run "devtool modify" followed by a build with bitbake on matching recipes',
+ description='Runs "devtool modify" followed by a build with bitbake on matching recipes')
+ parser_modify.set_defaults(func=stress_modify)
+
+ parser_extract = subparsers.add_parser('extract',
+ help='Run "devtool extract" on matching recipes',
+ description='Runs "devtool extract" on matching recipes')
+ parser_extract.set_defaults(func=stress_extract)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
+ import scriptpath
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ return 1
+ logger.debug('Found bitbake path: %s' % bitbakepath)
+
+ ret = args.func(args)
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/contrib/dialog-power-control b/poky/scripts/contrib/dialog-power-control
new file mode 100755
index 000000000..7550ea53b
--- /dev/null
+++ b/poky/scripts/contrib/dialog-power-control
@@ -0,0 +1,53 @@
+#!/bin/sh
+#
+# Simple script to show a manual power prompt for when you want to use
+# automated hardware testing with testimage.bbclass but you don't have a
+# web-enabled power strip or similar to do the power on/off/cycle.
+#
+# You can enable it by enabling testimage (see the Yocto Project
+# Development manual "Performing Automated Runtime Testing" section)
+# and setting the following in your local.conf:
+#
+# TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control"
+#
+
+PROMPT=""
+while true; do
+ case $1 in
+ on)
+ PROMPT="Please turn device power on";;
+ off)
+ PROMPT="Please turn device power off";;
+ cycle)
+ PROMPT="Please click Done, then turn the device power off then on";;
+ "")
+ break;;
+ esac
+ shift
+done
+
+if [ "$PROMPT" = "" ] ; then
+ echo "ERROR: no power action specified on command line"
+ exit 2
+fi
+
+if [ "`which kdialog 2>/dev/null`" != "" ] ; then
+ DIALOGUTIL="kdialog"
+elif [ "`which zenity 2>/dev/null`" != "" ] ; then
+ DIALOGUTIL="zenity"
+else
+ echo "ERROR: couldn't find program to display a message, install kdialog or zenity"
+ exit 3
+fi
+
+if [ "$DIALOGUTIL" = "kdialog" ] ; then
+ kdialog --yesno "$PROMPT" --title "TestImage Power Control" --yes-label "Done" --no-label "Cancel test"
+elif [ "$DIALOGUTIL" = "zenity" ] ; then
+ zenity --question --text="$PROMPT" --title="TestImage Power Control" --ok-label="Done" --cancel-label="Cancel test"
+fi
+
+if [ "$?" != "0" ] ; then
+ echo "User cancelled test at power prompt"
+ exit 1
+fi
+
diff --git a/poky/scripts/contrib/documentation-audit.sh b/poky/scripts/contrib/documentation-audit.sh
new file mode 100755
index 000000000..2144aac93
--- /dev/null
+++ b/poky/scripts/contrib/documentation-audit.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+#
+# Perform an audit of which packages provide documentation and which
+# are missing -doc packages.
+#
+# Setup requirements: be sure to be building for MACHINE=qemux86. Run
+# this script after source'ing the build environment script, so you're
+# running it from build/ directory.
+#
+# Maintainer: Scott Garman <scott.a.garman@intel.com>
+
+REPORT_DOC_SIMPLE="documentation_exists.txt"
+REPORT_DOC_DETAIL="documentation_exists_detail.txt"
+REPORT_MISSING_SIMPLE="documentation_missing.txt"
+REPORT_MISSING_DETAIL="documentation_missing_detail.txt"
+REPORT_BUILD_ERRORS="build_errors.txt"
+
+rm -rf $REPORT_DOC_SIMPLE $REPORT_DOC_DETAIL $REPORT_MISSING_SIMPLE $REPORT_MISSING_DETAIL
+
+BITBAKE=`which bitbake`
+if [ -z "$BITBAKE" ]; then
+ echo "Error: bitbake command not found."
+ echo "Did you forget to source the build environment script?"
+ exit 1
+fi
+
+echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
+echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
+echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+
+for pkg in `bitbake -s | awk '{ print \$1 }'`; do
+ if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
+ "$pkg" == "Recipe" ||
+ "$pkg" == "Parsing" || "$pkg" == "Package" ||
+ "$pkg" == "NOTE:" || "$pkg" == "WARNING:" ||
+ "$pkg" == "done." || "$pkg" == "===========" ]]
+ then
+ # Skip initial bitbake output
+ continue
+ fi
+ if [[ "$pkg" =~ -native$ || "$pkg" =~ -nativesdk$ ||
+ "$pkg" =~ -cross-canadian ]]; then
+ # Skip native/nativesdk/cross-canadian recipes
+ continue
+ fi
+ if [[ "$pkg" =~ ^meta- || "$pkg" =~ ^packagegroup- || "$pkg" =~ -image ]]; then
+ # Skip meta, task and image recipes
+ continue
+ fi
+ if [[ "$pkg" =~ ^glibc- || "$pkg" =~ ^libiconv$ ||
+ "$pkg" =~ -toolchain$ || "$pkg" =~ ^package-index$ ||
+ "$pkg" =~ ^linux- || "$pkg" =~ ^adt-installer$ ||
+ "$pkg" =~ ^eds-tools$ || "$pkg" =~ ^external-python-tarball$ ||
+ "$pkg" =~ ^qt4-embedded$ || "$pkg" =~ ^qt-mobility ]]; then
+ # Skip glibc, libiconv, -toolchain, and other recipes known
+ # to cause build conflicts or trigger false positives.
+ continue
+ fi
+
+ echo "Building package $pkg..."
+ bitbake $pkg > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "There was an error building package $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_BUILD_ERRORS
+
+ # Do not skip the remaining tests, as sometimes the
+ # exit status is 1 due to QA errors, and we can still
+ # perform the -doc checks.
+ fi
+
+ echo "$pkg built successfully, checking for a documentation package..."
+ WORKDIR=`bitbake -e $pkg | grep ^WORKDIR | awk -F '=' '{ print \$2 }' | awk -F '"' '{ print \$2 }'`
+ FIND_DOC_PKG=`find $WORKDIR/packages-split/*-doc -maxdepth 0 -type d`
+ if [ -z "$FIND_DOC_PKG" ]; then
+ # No -doc package was generated:
+ echo "No -doc package: $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_MISSING_SIMPLE
+ continue
+ fi
+
+ FIND_DOC_FILES=`find $FIND_DOC_PKG -type f`
+ if [ -z "$FIND_DOC_FILES" ]; then
+ # No files shipped with the -doc package:
+ echo "No files shipped with the -doc package: $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_MISSING_SIMPLE
+ continue
+ fi
+
+ echo "Documentation shipped with $pkg:" >> "$REPORT_DOC_DETAIL"
+ echo "$FIND_DOC_FILES" >> "$REPORT_DOC_DETAIL"
+ echo "" >> "$REPORT_DOC_DETAIL"
+
+ echo "$pkg" >> "$REPORT_DOC_SIMPLE"
+done
diff --git a/poky/scripts/contrib/graph-tool b/poky/scripts/contrib/graph-tool
new file mode 100755
index 000000000..1df5b8c34
--- /dev/null
+++ b/poky/scripts/contrib/graph-tool
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+
+# Simple graph query utility
+# useful for getting answers from .dot files produced by bitbake -g
+#
+# Written by: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Copyright 2013 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import sys
+
+def get_path_networkx(dotfile, fromnode, tonode):
+ try:
+ import networkx
+ except ImportError:
+ print('ERROR: Please install the networkx python module')
+ sys.exit(1)
+
+ graph = networkx.DiGraph(networkx.nx_pydot.read_dot(dotfile))
+ def node_missing(node):
+ import difflib
+ close_matches = difflib.get_close_matches(node, graph.nodes(), cutoff=0.7)
+ if close_matches:
+ print('ERROR: no node "%s" in graph. Close matches:\n %s' % (node, '\n '.join(close_matches)))
+ sys.exit(1)
+
+ if not fromnode in graph:
+ node_missing(fromnode)
+ if not tonode in graph:
+ node_missing(tonode)
+ return networkx.all_simple_paths(graph, source=fromnode, target=tonode)
+
+
+def find_paths(args, usage):
+ if len(args) < 3:
+ usage()
+ sys.exit(1)
+
+ fromnode = args[1]
+ tonode = args[2]
+
+ path = None
+ for path in get_path_networkx(args[0], fromnode, tonode):
+ print(" -> ".join(map(str, path)))
+ if not path:
+ print("ERROR: no path from %s to %s in graph" % (fromnode, tonode))
+ sys.exit(1)
+
+def main():
+ import optparse
+ parser = optparse.OptionParser(
+ usage = '''%prog [options] <command> <arguments>
+
+Available commands:
+ find-paths <dotfile> <from> <to>
+ Find all of the paths between two nodes in a dot graph''')
+
+ #parser.add_option("-d", "--debug",
+ # help = "Report all SRCREV values, not just ones where AUTOREV has been used",
+ # action="store_true", dest="debug", default=False)
+
+ options, args = parser.parse_args(sys.argv)
+ args = args[1:]
+
+ if len(args) < 1:
+ parser.print_help()
+ sys.exit(1)
+
+ if args[0] == "find-paths":
+ find_paths(args[1:], parser.print_help)
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/contrib/list-packageconfig-flags.py b/poky/scripts/contrib/list-packageconfig-flags.py
new file mode 100755
index 000000000..7ce718624
--- /dev/null
+++ b/poky/scripts/contrib/list-packageconfig-flags.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation.
+#
+# Copyright (C) 2013 Wind River Systems, Inc.
+# Copyright (C) 2014 Intel Corporation
+#
+# - list available recipes which have PACKAGECONFIG flags
+# - list available PACKAGECONFIG flags and all affected recipes
+# - list all recipes and PACKAGECONFIG information
+
+import sys
+import optparse
+import os
+
+
+scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
+lib_path = os.path.abspath(scripts_path + '/../lib')
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+
+# For importing the following modules
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
+ sys.exit(1)
+
+import bb.cooker
+import bb.providers
+import bb.tinfoil
+
+def get_fnlist(bbhandler, pkg_pn, preferred):
+ ''' Get all recipe file names '''
+ if preferred:
+ (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
+
+ fn_list = []
+ for pn in sorted(pkg_pn):
+ if preferred:
+ fn_list.append(preferred_versions[pn][1])
+ else:
+ fn_list.extend(pkg_pn[pn])
+
+ return fn_list
+
+def get_recipesdata(bbhandler, preferred):
+ ''' Get data of all available recipes which have PACKAGECONFIG flags '''
+ pkg_pn = bbhandler.cooker.recipecaches[''].pkg_pn
+
+ data_dict = {}
+ for fn in get_fnlist(bbhandler, pkg_pn, preferred):
+ data = bbhandler.parse_recipe_file(fn)
+ flags = data.getVarFlags("PACKAGECONFIG")
+ flags.pop('doc', None)
+ if flags:
+ data_dict[fn] = data
+
+ return data_dict
+
+def collect_pkgs(data_dict):
+ ''' Collect available pkgs in which have PACKAGECONFIG flags '''
+ # pkg_dict = {'pkg1': ['flag1', 'flag2',...]}
+ pkg_dict = {}
+ for fn in data_dict:
+ pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
+ pkgconfigflags.pop('doc', None)
+ pkgname = data_dict[fn].getVar("P")
+ pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
+
+ return pkg_dict
+
+def collect_flags(pkg_dict):
+ ''' Collect available PACKAGECONFIG flags and all affected pkgs '''
+ # flag_dict = {'flag': ['pkg1', 'pkg2',...]}
+ flag_dict = {}
+ for pkgname, flaglist in pkg_dict.items():
+ for flag in flaglist:
+ if flag in flag_dict:
+ flag_dict[flag].append(pkgname)
+ else:
+ flag_dict[flag] = [pkgname]
+
+ return flag_dict
+
+def display_pkgs(pkg_dict):
+ ''' Display available pkgs which have PACKAGECONFIG flags '''
+ pkgname_len = len("RECIPE NAME") + 1
+ for pkgname in pkg_dict:
+ if pkgname_len < len(pkgname):
+ pkgname_len = len(pkgname)
+ pkgname_len += 1
+
+ header = '%-*s%s' % (pkgname_len, str("RECIPE NAME"), str("PACKAGECONFIG FLAGS"))
+ print(header)
+ print(str("").ljust(len(header), '='))
+ for pkgname in sorted(pkg_dict):
+ print('%-*s%s' % (pkgname_len, pkgname, ' '.join(pkg_dict[pkgname])))
+
+
+def display_flags(flag_dict):
+ ''' Display available PACKAGECONFIG flags and all affected pkgs '''
+ flag_len = len("PACKAGECONFIG FLAG") + 5
+
+ header = '%-*s%s' % (flag_len, str("PACKAGECONFIG FLAG"), str("RECIPE NAMES"))
+ print(header)
+ print(str("").ljust(len(header), '='))
+
+ for flag in sorted(flag_dict):
+ print('%-*s%s' % (flag_len, flag, ' '.join(sorted(flag_dict[flag]))))
+
+def display_all(data_dict):
+ ''' Display all pkgs and PACKAGECONFIG information '''
+ print(str("").ljust(50, '='))
+ for fn in data_dict:
+ print('%s' % data_dict[fn].getVar("P"))
+ print(fn)
+ packageconfig = data_dict[fn].getVar("PACKAGECONFIG") or ''
+ if packageconfig.strip() == '':
+ packageconfig = 'None'
+ print('PACKAGECONFIG %s' % packageconfig)
+
+ for flag,flag_val in data_dict[fn].getVarFlags("PACKAGECONFIG").items():
+ if flag == "doc":
+ continue
+ print('PACKAGECONFIG[%s] %s' % (flag, flag_val))
+ print('')
+
+def main():
+ pkg_dict = {}
+ flag_dict = {}
+
+ # Collect and validate input
+ parser = optparse.OptionParser(
+ description = "Lists recipes and PACKAGECONFIG flags. Without -a or -f, recipes and their available PACKAGECONFIG flags are listed.",
+ usage = """
+ %prog [options]""")
+
+ parser.add_option("-f", "--flags",
+ help = "list available PACKAGECONFIG flags and affected recipes",
+ action="store_const", dest="listtype", const="flags", default="recipes")
+ parser.add_option("-a", "--all",
+ help = "list all recipes and PACKAGECONFIG information",
+ action="store_const", dest="listtype", const="all")
+ parser.add_option("-p", "--preferred-only",
+ help = "where multiple recipe versions are available, list only the preferred version",
+ action="store_true", dest="preferred", default=False)
+
+ options, args = parser.parse_args(sys.argv)
+
+ with bb.tinfoil.Tinfoil() as bbhandler:
+ bbhandler.prepare()
+ print("Gathering recipe data...")
+ data_dict = get_recipesdata(bbhandler, options.preferred)
+
+ if options.listtype == 'flags':
+ pkg_dict = collect_pkgs(data_dict)
+ flag_dict = collect_flags(pkg_dict)
+ display_flags(flag_dict)
+ elif options.listtype == 'recipes':
+ pkg_dict = collect_pkgs(data_dict)
+ display_pkgs(pkg_dict)
+ elif options.listtype == 'all':
+ display_all(data_dict)
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/contrib/mkefidisk.sh b/poky/scripts/contrib/mkefidisk.sh
new file mode 100755
index 000000000..ac4ec9c7f
--- /dev/null
+++ b/poky/scripts/contrib/mkefidisk.sh
@@ -0,0 +1,464 @@
+#!/bin/sh
+#
+# Copyright (c) 2012, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+LANG=C
+
+echo
+echo "WARNING: This script is deprecated and will be removed soon."
+echo "Please consider using wic EFI images instead."
+echo
+
+# Set to 1 to enable additional output
+DEBUG=0
+OUT="/dev/null"
+
+#
+# Defaults
+#
+# 20 Mb for the boot partition
+BOOT_SIZE=20
+# 5% for swap
+SWAP_RATIO=5
+
+# Cleanup after die()
+cleanup() {
+ debug "Syncing and unmounting devices"
+ # Unmount anything we mounted
+ unmount $ROOTFS_MNT || error "Failed to unmount $ROOTFS_MNT"
+ unmount $BOOTFS_MNT || error "Failed to unmount $BOOTFS_MNT"
+ unmount $HDDIMG_ROOTFS_MNT || error "Failed to unmount $HDDIMG_ROOTFS_MNT"
+ unmount $HDDIMG_MNT || error "Failed to unmount $HDDIMG_MNT"
+
+ # Remove the TMPDIR
+ debug "Removing temporary files"
+ if [ -d "$TMPDIR" ]; then
+ rm -rf $TMPDIR || error "Failed to remove $TMPDIR"
+ fi
+}
+
+trap 'die "Signal Received, Aborting..."' HUP INT TERM
+
+# Logging routines
+WARNINGS=0
+ERRORS=0
+CLEAR="$(tput sgr0)"
+INFO="$(tput bold)"
+RED="$(tput setaf 1)$(tput bold)"
+GREEN="$(tput setaf 2)$(tput bold)"
+YELLOW="$(tput setaf 3)$(tput bold)"
+info() {
+ echo "${INFO}$1${CLEAR}"
+}
+error() {
+ ERRORS=$((ERRORS+1))
+ echo "${RED}$1${CLEAR}"
+}
+warn() {
+ WARNINGS=$((WARNINGS+1))
+ echo "${YELLOW}$1${CLEAR}"
+}
+success() {
+ echo "${GREEN}$1${CLEAR}"
+}
+die() {
+ error "$1"
+ cleanup
+ exit 1
+}
+debug() {
+ if [ $DEBUG -eq 1 ]; then
+ echo "$1"
+ fi
+}
+
+usage() {
+ echo "Usage: $(basename $0) [-v] DEVICE HDDIMG TARGET_DEVICE"
+ echo " -v: Verbose debug"
+ echo " DEVICE: The device to write the image to, e.g. /dev/sdh"
+ echo " HDDIMG: The hddimg file to generate the efi disk from"
+ echo " TARGET_DEVICE: The device the target will boot from, e.g. /dev/mmcblk0"
+}
+
+image_details() {
+ IMG=$1
+ info "Image details"
+ echo " image: $(stat --printf '%N\n' $IMG)"
+ echo " size: $(stat -L --printf '%s bytes\n' $IMG)"
+ echo " modified: $(stat -L --printf '%y\n' $IMG)"
+ echo " type: $(file -L -b $IMG)"
+ echo ""
+}
+
+device_details() {
+ DEV=$1
+ BLOCK_SIZE=512
+
+ info "Device details"
+ echo " device: $DEVICE"
+ if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
+ echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
+ else
+ echo " vendor: UNKOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/device/model" ]; then
+ echo " model: $(cat /sys/class/block/$DEV/device/model)"
+ else
+ echo " model: UNKNOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/size" ]; then
+ echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes"
+ else
+ echo " size: UNKNOWN"
+ fi
+ echo ""
+}
+
+unmount_device() {
+ grep -q $DEVICE /proc/mounts
+ if [ $? -eq 0 ]; then
+ warn "$DEVICE listed in /proc/mounts, attempting to unmount"
+ umount $DEVICE* 2>/dev/null
+ return $?
+ fi
+ return 0
+}
+
+unmount() {
+ if [ "$1" = "" ] ; then
+ return 0
+ fi
+ grep -q $1 /proc/mounts
+ if [ $? -eq 0 ]; then
+ debug "Unmounting $1"
+ umount $1
+ return $?
+ fi
+ return 0
+}
+
+#
+# Parse and validate arguments
+#
+if [ $# -lt 3 ] || [ $# -gt 4 ]; then
+ if [ $# -eq 1 ]; then
+ AVAILABLE_DISK=`lsblk | grep "disk" | cut -f 1 -d " "`
+ X=0
+ for disk in `echo $AVAILABLE_DISK`; do
+ mounted=`lsblk /dev/$disk | awk {'print $7'} | sed "s/MOUNTPOINT//"`
+ if [ -z "$mounted" ]; then
+ UNMOUNTED_AVAILABLES="$UNMOUNTED_AVAILABLES /dev/$disk"
+ info "$X - /dev/$disk"
+ X=`expr $X + 1`
+ fi
+ done
+ if [ $X -eq 0 ]; then
+ die "No unmounted device found."
+ fi
+ read -p "Choose unmounted device number: " DISK_NUMBER
+ X=0
+ for line in `echo $UNMOUNTED_AVAILABLES`; do
+ if [ $DISK_NUMBER -eq $X ]; then
+ DISK_TO_BE_FLASHED=$line
+ break
+ else
+ X=`expr $X + 1`
+ fi
+ done
+ if [ -z "$DISK_TO_BE_FLASHED" ]; then
+ die "Option \"$DISK_NUMBER\" is invalid. Choose a valid option"
+ else
+ if [ -z `echo $DISK_TO_BE_FLASHED | grep "mmc"` ]; then
+ TARGET_TO_BE_BOOT="/dev/sda"
+ else
+ TARGET_TO_BE_BOOT="/dev/mmcblk0"
+ fi
+ fi
+ echo ""
+ echo "Choose a name of the device that will be boot from"
+ echo -n "Recommended name is: "
+ info "$TARGET_TO_BE_BOOT"
+ read -p "Is target device okay? [y/N]: " RESPONSE
+ if [ "$RESPONSE" != "y" ]; then
+ read -p "Choose target device name: " TARGET_TO_BE_BOOT
+ fi
+ echo ""
+ if [ -z "$TARGET_TO_BE_BOOT" ]; then
+ die "Error: choose a valid target name"
+ fi
+ else
+ usage
+ exit 1
+ fi
+fi
+
+if [ "$1" = "-v" ]; then
+ DEBUG=1
+ OUT="1"
+ shift
+fi
+
+if [ -z "$AVAILABLE_DISK" ]; then
+ DEVICE=$1
+ HDDIMG=$2
+ TARGET_DEVICE=$3
+else
+ DEVICE=$DISK_TO_BE_FLASHED
+ HDDIMG=$1
+ TARGET_DEVICE=$TARGET_TO_BE_BOOT
+fi
+
+LINK=$(readlink $DEVICE)
+if [ $? -eq 0 ]; then
+ DEVICE="$LINK"
+fi
+
+if [ ! -w "$DEVICE" ]; then
+ usage
+ if [ ! -e "${DEVICE}" ] ; then
+ die "Device $DEVICE cannot be found"
+ else
+ die "Device $DEVICE is not writable (need to run under sudo?)"
+ fi
+fi
+
+if [ ! -e "$HDDIMG" ]; then
+ usage
+ die "HDDIMG $HDDIMG does not exist"
+fi
+
+#
+# Ensure the hddimg is not mounted
+#
+unmount "$HDDIMG" || die "Failed to unmount $HDDIMG"
+
+#
+# Check if any $DEVICE partitions are mounted
+#
+unmount_device || die "Failed to unmount $DEVICE"
+
+#
+# Confirm device with user
+#
+image_details $HDDIMG
+device_details $(basename $DEVICE)
+echo -n "${INFO}Prepare EFI image on $DEVICE [y/N]?${CLEAR} "
+read RESPONSE
+if [ "$RESPONSE" != "y" ]; then
+ echo "Image creation aborted"
+ exit 0
+fi
+
+
+#
+# Prepare the temporary working space
+#
+TMPDIR=$(mktemp -d mkefidisk-XXX) || die "Failed to create temporary mounting directory."
+HDDIMG_MNT=$TMPDIR/hddimg
+HDDIMG_ROOTFS_MNT=$TMPDIR/hddimg_rootfs
+ROOTFS_MNT=$TMPDIR/rootfs
+BOOTFS_MNT=$TMPDIR/bootfs
+mkdir $HDDIMG_MNT || die "Failed to create $HDDIMG_MNT"
+mkdir $HDDIMG_ROOTFS_MNT || die "Failed to create $HDDIMG_ROOTFS_MNT"
+mkdir $ROOTFS_MNT || die "Failed to create $ROOTFS_MNT"
+mkdir $BOOTFS_MNT || die "Failed to create $BOOTFS_MNT"
+
+
+#
+# Partition $DEVICE
+#
+DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
+# If the device size is not reported there may not be a valid label
+if [ "$DEVICE_SIZE" = "" ] ; then
+ parted -s $DEVICE mklabel msdos || die "Failed to create MSDOS partition table"
+ DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
+fi
+SWAP_SIZE=$((DEVICE_SIZE*SWAP_RATIO/100))
+ROOTFS_SIZE=$((DEVICE_SIZE-BOOT_SIZE-SWAP_SIZE))
+ROOTFS_START=$((BOOT_SIZE))
+ROOTFS_END=$((ROOTFS_START+ROOTFS_SIZE))
+SWAP_START=$((ROOTFS_END))
+
+# MMC devices use a partition prefix character 'p'
+PART_PREFIX=""
+if [ ! "${DEVICE#/dev/mmcblk}" = "${DEVICE}" ] || [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
+ PART_PREFIX="p"
+fi
+BOOTFS=$DEVICE${PART_PREFIX}1
+ROOTFS=$DEVICE${PART_PREFIX}2
+SWAP=$DEVICE${PART_PREFIX}3
+
+TARGET_PART_PREFIX=""
+if [ ! "${TARGET_DEVICE#/dev/mmcblk}" = "${TARGET_DEVICE}" ]; then
+ TARGET_PART_PREFIX="p"
+fi
+TARGET_ROOTFS=$TARGET_DEVICE${TARGET_PART_PREFIX}2
+TARGET_SWAP=$TARGET_DEVICE${TARGET_PART_PREFIX}3
+
+echo ""
+info "Boot partition size: $BOOT_SIZE MB ($BOOTFS)"
+info "ROOTFS partition size: $ROOTFS_SIZE MB ($ROOTFS)"
+info "Swap partition size: $SWAP_SIZE MB ($SWAP)"
+echo ""
+
+# Use MSDOS by default as GPT cannot be reliably distributed in disk image form
+# as it requires the backup table to be on the last block of the device, which
+# of course varies from device to device.
+
+info "Partitioning installation media ($DEVICE)"
+
+debug "Deleting partition table on $DEVICE"
+dd if=/dev/zero of=$DEVICE bs=512 count=2 >$OUT 2>&1 || die "Failed to zero beginning of $DEVICE"
+
+debug "Creating new partition table (MSDOS) on $DEVICE"
+parted -s $DEVICE mklabel msdos >$OUT 2>&1 || die "Failed to create MSDOS partition table"
+
+debug "Creating boot partition on $BOOTFS"
+parted -s $DEVICE mkpart primary 0% $BOOT_SIZE >$OUT 2>&1 || die "Failed to create BOOT partition"
+
+debug "Enabling boot flag on $BOOTFS"
+parted -s $DEVICE set 1 boot on >$OUT 2>&1 || die "Failed to enable boot flag"
+
+debug "Creating ROOTFS partition on $ROOTFS"
+parted -s $DEVICE mkpart primary $ROOTFS_START $ROOTFS_END >$OUT 2>&1 || die "Failed to create ROOTFS partition"
+
+debug "Creating swap partition on $SWAP"
+parted -s $DEVICE mkpart primary $SWAP_START 100% >$OUT 2>&1 || die "Failed to create SWAP partition"
+
+if [ $DEBUG -eq 1 ]; then
+ parted -s $DEVICE print
+fi
+
+
+#
+# Check if any $DEVICE partitions are mounted after partitioning
+#
+unmount_device || die "Failed to unmount $DEVICE partitions"
+
+
+#
+# Format $DEVICE partitions
+#
+info "Formatting partitions"
+debug "Formatting $BOOTFS as vfat"
+if [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
+ mkfs.vfat -I $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
+else
+ mkfs.vfat $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
+fi
+
+debug "Formatting $ROOTFS as ext3"
+mkfs.ext3 -F $ROOTFS -L "ROOT" >$OUT 2>&1 || die "Failed to format $ROOTFS"
+
+debug "Formatting swap partition ($SWAP)"
+mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap"
+
+
+#
+# Installing to $DEVICE
+#
+debug "Mounting images and device in preparation for installation"
+mount -o ro,loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG"
+mount -o ro,loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img"
+mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT"
+mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT"
+
+info "Preparing boot partition"
+EFIDIR="$BOOTFS_MNT/EFI/BOOT"
+cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz"
+# Copy the efi loader and configs (booti*.efi and grub.cfg if it exists)
+cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir"
+# Silently ignore a missing systemd-boot loader dir (we might just be a GRUB image)
+cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1
+
+# Update the boot loaders configurations for an installed image
+# Remove any existing root= kernel parameters and:
+# o Add a root= parameter with the target rootfs
+# o Specify ro so fsck can be run during boot
+# o Specify rootwait in case the target media is an asyncronous block device
+# such as MMC or USB disks
+# o Specify "quiet" to minimize boot time when using slow serial consoles
+
+# Look for a GRUB installation
+GRUB_CFG="$EFIDIR/grub.cfg"
+if [ -e "$GRUB_CFG" ]; then
+ info "Configuring GRUB"
+ # Delete the install entry
+ sed -i "/menuentry 'install'/,/^}/d" $GRUB_CFG
+ # Delete the initrd lines
+ sed -i "/initrd /d" $GRUB_CFG
+ # Delete any LABEL= strings
+ sed -i "s/ LABEL=[^ ]*/ /" $GRUB_CFG
+
+ sed -i "s@ root=[^ ]*@ @" $GRUB_CFG
+ sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GRUB_CFG
+fi
+
+# Look for a systemd-boot installation
+SYSTEMD_BOOT_ENTRIES="$BOOTFS_MNT/loader/entries"
+SYSTEMD_BOOT_CFG="$SYSTEMD_BOOT_ENTRIES/boot.conf"
+if [ -d "$SYSTEMD_BOOT_ENTRIES" ]; then
+ info "Configuring SystemD-boot"
+ # remove the install target if it exists
+ rm $SYSTEMD_BOOT_ENTRIES/install.conf >$OUT 2>&1
+
+ if [ ! -e "$SYSTEMD_BOOT_CFG" ]; then
+ echo "ERROR: $SYSTEMD_BOOT_CFG not found"
+ fi
+
+ sed -i "/initrd /d" $SYSTEMD_BOOT_CFG
+ sed -i "s@ root=[^ ]*@ @" $SYSTEMD_BOOT_CFG
+ sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $SYSTEMD_BOOT_CFG
+fi
+
+# Ensure we have at least one EFI bootloader configured
+if [ ! -e $GRUB_CFG ] && [ ! -e $SYSTEMD_BOOT_CFG ]; then
+ die "No EFI bootloader configuration found"
+fi
+
+
+info "Copying ROOTFS files (this may take a while)"
+cp -a $HDDIMG_ROOTFS_MNT/* $ROOTFS_MNT >$OUT 2>&1 || die "Root FS copy failed"
+
+echo "$TARGET_SWAP swap swap defaults 0 0" >> $ROOTFS_MNT/etc/fstab
+
+# We dont want udev to mount our root device while we're booting...
+if [ -d $ROOTFS_MNT/etc/udev/ ] ; then
+ echo "$TARGET_DEVICE" >> $ROOTFS_MNT/etc/udev/mount.blacklist
+fi
+
+# Add startup.nsh script for automated boot
+printf "fs0:\%s\BOOT\%s\n" "EFI" "bootx64.efi" > $BOOTFS_MNT/startup.nsh
+
+
+# Call cleanup to unmount devices and images and remove the TMPDIR
+cleanup
+
+echo ""
+if [ $WARNINGS -ne 0 ] && [ $ERRORS -eq 0 ]; then
+ echo "${YELLOW}Installation completed with warnings${CLEAR}"
+ echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
+elif [ $ERRORS -ne 0 ]; then
+ echo "${RED}Installation encountered errors${CLEAR}"
+ echo "${RED}Errors: $ERRORS${CLEAR}"
+ echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
+else
+ success "Installation completed successfully"
+fi
+echo ""
diff --git a/poky/scripts/contrib/oe-build-perf-report-email.py b/poky/scripts/contrib/oe-build-perf-report-email.py
new file mode 100755
index 000000000..913847bbe
--- /dev/null
+++ b/poky/scripts/contrib/oe-build-perf-report-email.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python3
+#
+# Send build performance test report emails
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import base64
+import logging
+import os
+import pwd
+import re
+import shutil
+import smtplib
+import socket
+import subprocess
+import sys
+import tempfile
+from email.mime.image import MIMEImage
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+
+# Find js scaper script
+SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
+ 'scrape-html-report.js')
+if not os.path.isfile(SCRAPE_JS):
+ log.error("Unableto find oe-build-perf-report-scrape.js")
+ sys.exit(1)
+
+
+class ReportError(Exception):
+ """Local errors"""
+ pass
+
+
+def check_utils():
+ """Check that all needed utils are installed in the system"""
+ missing = []
+ for cmd in ('phantomjs', 'optipng'):
+ if not shutil.which(cmd):
+ missing.append(cmd)
+ if missing:
+ log.error("The following tools are missing: %s", ' '.join(missing))
+ sys.exit(1)
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ description = """Email build perf test report"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--quiet', '-q', action='store_true',
+ help="Only print errors")
+ parser.add_argument('--to', action='append',
+ help="Recipients of the email")
+ parser.add_argument('--cc', action='append',
+ help="Carbon copy recipients of the email")
+ parser.add_argument('--bcc', action='append',
+ help="Blind carbon copy recipients of the email")
+ parser.add_argument('--subject', default="Yocto build perf test report",
+ help="Email subject")
+ parser.add_argument('--outdir', '-o',
+ help="Store files in OUTDIR. Can be used to preserve "
+ "the email parts")
+ parser.add_argument('--text',
+ help="Plain text message")
+ parser.add_argument('--html',
+ help="HTML peport generated by oe-build-perf-report")
+ parser.add_argument('--phantomjs-args', action='append',
+ help="Extra command line arguments passed to PhantomJS")
+
+ args = parser.parse_args(argv)
+
+ if not args.html and not args.text:
+ parser.error("Please specify --html and/or --text")
+
+ return args
+
+
+def decode_png(infile, outfile):
+ """Parse/decode/optimize png data from a html element"""
+ with open(infile) as f:
+ raw_data = f.read()
+
+ # Grab raw base64 data
+ b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
+ b64_data = re.sub('">.+$', '', b64_data, 1)
+
+ # Replace file with proper decoded png
+ with open(outfile, 'wb') as f:
+ f.write(base64.b64decode(b64_data))
+
+ subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
+
+
+def mangle_html_report(infile, outfile, pngs):
+ """Mangle html file into a email compatible format"""
+ paste = True
+ png_dir = os.path.dirname(outfile)
+ with open(infile) as f_in:
+ with open(outfile, 'w') as f_out:
+ for line in f_in.readlines():
+ stripped = line.strip()
+ # Strip out scripts
+ if stripped == '<!--START-OF-SCRIPTS-->':
+ paste = False
+ elif stripped == '<!--END-OF-SCRIPTS-->':
+ paste = True
+ elif paste:
+ if re.match('^.+href="data:image/png;base64', stripped):
+ # Strip out encoded pngs (as they're huge in size)
+ continue
+ elif 'www.gstatic.com' in stripped:
+ # HACK: drop references to external static pages
+ continue
+
+ # Replace charts with <img> elements
+ match = re.match('<div id="(?P<id>\w+)"', stripped)
+ if match and match.group('id') in pngs:
+ f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
+ else:
+ f_out.write(line)
+
+
+def scrape_html_report(report, outdir, phantomjs_extra_args=None):
+ """Scrape html report into a format sendable by email"""
+ tmpdir = tempfile.mkdtemp(dir='.')
+ log.debug("Using tmpdir %s for phantomjs output", tmpdir)
+
+ if not os.path.isdir(outdir):
+ os.mkdir(outdir)
+ if os.path.splitext(report)[1] not in ('.html', '.htm'):
+ raise ReportError("Invalid file extension for report, needs to be "
+ "'.html' or '.htm'")
+
+ try:
+ log.info("Scraping HTML report with PhangomJS")
+ extra_args = phantomjs_extra_args if phantomjs_extra_args else []
+ subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
+ [SCRAPE_JS, report, tmpdir],
+ stderr=subprocess.STDOUT)
+
+ pngs = []
+ images = []
+ for fname in os.listdir(tmpdir):
+ base, ext = os.path.splitext(fname)
+ if ext == '.png':
+ log.debug("Decoding %s", fname)
+ decode_png(os.path.join(tmpdir, fname),
+ os.path.join(outdir, fname))
+ pngs.append(base)
+ images.append(fname)
+ elif ext in ('.html', '.htm'):
+ report_file = fname
+ else:
+ log.warning("Unknown file extension: '%s'", ext)
+ #shutil.move(os.path.join(tmpdir, fname), outdir)
+
+ log.debug("Mangling html report file %s", report_file)
+ mangle_html_report(os.path.join(tmpdir, report_file),
+ os.path.join(outdir, report_file), pngs)
+ return (os.path.join(outdir, report_file),
+ [os.path.join(outdir, i) for i in images])
+ finally:
+ shutil.rmtree(tmpdir)
+
+def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
+ blind_copy=[]):
+ """Send email"""
+ # Generate email message
+ text_msg = html_msg = None
+ if text_fn:
+ with open(text_fn) as f:
+ text_msg = MIMEText("Yocto build performance test report.\n" +
+ f.read(), 'plain')
+ if html_fn:
+ html_msg = msg = MIMEMultipart('related')
+ with open(html_fn) as f:
+ html_msg.attach(MIMEText(f.read(), 'html'))
+ for img_fn in image_fns:
+ # Expect that content id is same as the filename
+ cid = os.path.splitext(os.path.basename(img_fn))[0]
+ with open(img_fn, 'rb') as f:
+ image_msg = MIMEImage(f.read())
+ image_msg['Content-ID'] = '<{}>'.format(cid)
+ html_msg.attach(image_msg)
+
+ if text_msg and html_msg:
+ msg = MIMEMultipart('alternative')
+ msg.attach(text_msg)
+ msg.attach(html_msg)
+ elif text_msg:
+ msg = text_msg
+ elif html_msg:
+ msg = html_msg
+ else:
+ raise ReportError("Neither plain text nor html body specified")
+
+ pw_data = pwd.getpwuid(os.getuid())
+ full_name = pw_data.pw_gecos.split(',')[0]
+ email = os.environ.get('EMAIL',
+ '{}@{}'.format(pw_data.pw_name, socket.getfqdn()))
+ msg['From'] = "{} <{}>".format(full_name, email)
+ msg['To'] = ', '.join(recipients)
+ if copy:
+ msg['Cc'] = ', '.join(copy)
+ if blind_copy:
+ msg['Bcc'] = ', '.join(blind_copy)
+ msg['Subject'] = subject
+
+ # Send email
+ with smtplib.SMTP('localhost') as smtp:
+ smtp.send_message(msg)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.quiet:
+ log.setLevel(logging.ERROR)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ check_utils()
+
+ if args.outdir:
+ outdir = args.outdir
+ if not os.path.exists(outdir):
+ os.mkdir(outdir)
+ else:
+ outdir = tempfile.mkdtemp(dir='.')
+
+ try:
+ log.debug("Storing email parts in %s", outdir)
+ html_report = images = None
+ if args.html:
+ html_report, images = scrape_html_report(args.html, outdir,
+ args.phantomjs_args)
+
+ if args.to:
+ log.info("Sending email to %s", ', '.join(args.to))
+ if args.cc:
+ log.info("Copying to %s", ', '.join(args.cc))
+ if args.bcc:
+ log.info("Blind copying to %s", ', '.join(args.bcc))
+ send_email(args.text, html_report, images, args.subject,
+ args.to, args.cc, args.bcc)
+ except subprocess.CalledProcessError as err:
+ log.error("%s, with output:\n%s", str(err), err.output.decode())
+ return 1
+ except ReportError as err:
+ log.error(err)
+ return 1
+ finally:
+ if not args.outdir:
+ log.debug("Wiping %s", outdir)
+ shutil.rmtree(outdir)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/contrib/patchreview.py b/poky/scripts/contrib/patchreview.py
new file mode 100755
index 000000000..4e3e73c7a
--- /dev/null
+++ b/poky/scripts/contrib/patchreview.py
@@ -0,0 +1,211 @@
+#! /usr/bin/env python3
+
+# TODO
+# - option to just list all broken files
+# - test suite
+# - validate signed-off-by
+
+
+class Result:
+ # Whether the patch has an Upstream-Status or not
+ missing_upstream_status = False
+ # If the Upstream-Status tag is malformed in some way (string for bad bit)
+ malformed_upstream_status = None
+ # If the Upstream-Status value is unknown (boolean)
+ unknown_upstream_status = False
+ # The upstream status value (Pending, etc)
+ upstream_status = None
+ # Whether the patch has a Signed-off-by or not
+ missing_sob = False
+ # Whether the Signed-off-by tag is malformed in some way
+ malformed_sob = False
+ # The Signed-off-by tag value
+ sob = None
+ # Whether a patch looks like a CVE but doesn't have a CVE tag
+ missing_cve = False
+
+def blame_patch(patch):
+ """
+ From a patch filename, return a list of "commit summary (author name <author
+ email>)" strings representing the history.
+ """
+ import subprocess
+ return subprocess.check_output(("git", "log",
+ "--follow", "--find-renames", "--diff-filter=A",
+ "--format=%s (%aN <%aE>)",
+ "--", patch)).decode("utf-8").splitlines()
+
+def patchreview(patches):
+ import re
+
+ # General pattern: start of line, optional whitespace, tag with optional
+ # hyphen or spaces, maybe a colon, some whitespace, then the value, all case
+ # insensitive.
+ sob_re = re.compile(r"^[\t ]*(Signed[-_ ]off[-_ ]by:?)[\t ]*(.+)", re.IGNORECASE | re.MULTILINE)
+ status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*(\w*)", re.IGNORECASE | re.MULTILINE)
+ status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied")
+ cve_tag_re = re.compile(r"^[\t ]*(CVE:)[\t ]*(.*)", re.IGNORECASE | re.MULTILINE)
+ cve_re = re.compile(r"cve-[0-9]{4}-[0-9]{4,6}", re.IGNORECASE)
+
+ results = {}
+
+ for patch in patches:
+ result = Result()
+ results[patch] = result
+
+ content = open(patch, encoding='ascii', errors='ignore').read()
+
+ # Find the Signed-off-by tag
+ match = sob_re.search(content)
+ if match:
+ value = match.group(1)
+ if value != "Signed-off-by:":
+ result.malformed_sob = value
+ result.sob = match.group(2)
+ else:
+ result.missing_sob = True
+
+
+ # Find the Upstream-Status tag
+ match = status_re.search(content)
+ if match:
+ value = match.group(1)
+ if value != "Upstream-Status:":
+ result.malformed_upstream_status = value
+
+ value = match.group(2).lower()
+ # TODO: check case
+ if value not in status_values:
+ result.unknown_upstream_status = True
+ result.upstream_status = value
+ else:
+ result.missing_upstream_status = True
+
+ # Check that patches which looks like CVEs have CVE tags
+ if cve_re.search(patch) or cve_re.search(content):
+ if not cve_tag_re.search(content):
+ result.missing_cve = True
+ # TODO: extract CVE list
+
+ return results
+
+
+def analyse(results, want_blame=False, verbose=True):
+ """
+ want_blame: display blame data for each malformed patch
+ verbose: display per-file results instead of just summary
+ """
+
+ # want_blame requires verbose, so disable blame if we're not verbose
+ if want_blame and not verbose:
+ want_blame = False
+
+ total_patches = 0
+ missing_sob = 0
+ malformed_sob = 0
+ missing_status = 0
+ malformed_status = 0
+ missing_cve = 0
+ pending_patches = 0
+
+ for patch in sorted(results):
+ r = results[patch]
+ total_patches += 1
+ need_blame = False
+
+ # Build statistics
+ if r.missing_sob:
+ missing_sob += 1
+ if r.malformed_sob:
+ malformed_sob += 1
+ if r.missing_upstream_status:
+ missing_status += 1
+ if r.malformed_upstream_status or r.unknown_upstream_status:
+ malformed_status += 1
+ if r.missing_cve:
+ missing_cve += 1
+ if r.upstream_status == "pending":
+ pending_patches += 1
+
+ # Output warnings
+ if r.missing_sob:
+ need_blame = True
+ if verbose:
+ print("Missing Signed-off-by tag (%s)" % patch)
+ # TODO: disable this for now as too much fails
+ if False and r.malformed_sob:
+ need_blame = True
+ if verbose:
+ print("Malformed Signed-off-by '%s' (%s)" % (r.malformed_sob, patch))
+ if r.missing_cve:
+ need_blame = True
+ if verbose:
+ print("Missing CVE tag (%s)" % patch)
+ if r.missing_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Missing Upstream-Status tag (%s)" % patch)
+ if r.malformed_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Malformed Upstream-Status '%s' (%s)" % (r.malformed_upstream_status, patch))
+ if r.unknown_upstream_status:
+ need_blame = True
+ if verbose:
+ print("Unknown Upstream-Status value '%s' (%s)" % (r.upstream_status, patch))
+
+ if want_blame and need_blame:
+ print("\n".join(blame_patch(patch)) + "\n")
+
+ def percent(num):
+ try:
+ return "%d (%d%%)" % (num, round(num * 100.0 / total_patches))
+ except ZeroDivisionError:
+ return "N/A"
+
+ if verbose:
+ print()
+
+ print("""Total patches found: %d
+Patches missing Signed-off-by: %s
+Patches with malformed Signed-off-by: %s
+Patches missing CVE: %s
+Patches missing Upstream-Status: %s
+Patches with malformed Upstream-Status: %s
+Patches in Pending state: %s""" % (total_patches,
+ percent(missing_sob),
+ percent(malformed_sob),
+ percent(missing_cve),
+ percent(missing_status),
+ percent(malformed_status),
+ percent(pending_patches)))
+
+
+
+def histogram(results):
+ from toolz import recipes, dicttoolz
+ import math
+ counts = recipes.countby(lambda r: r.upstream_status, results.values())
+ bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
+ for k in bars:
+ print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
+
+
+if __name__ == "__main__":
+ import argparse, subprocess, os
+
+ args = argparse.ArgumentParser(description="Patch Review Tool")
+ args.add_argument("-b", "--blame", action="store_true", help="show blame for malformed patches")
+ args.add_argument("-v", "--verbose", action="store_true", help="show per-patch results")
+ args.add_argument("-g", "--histogram", action="store_true", help="show patch histogram")
+ args.add_argument("directory", nargs="?", help="directory to scan")
+ args = args.parse_args()
+
+ if args.directory:
+ os.chdir(args.directory)
+ patches = subprocess.check_output(("git", "ls-files", "*.patch", "*.diff")).decode("utf-8").split()
+ results = patchreview(patches)
+ analyse(results, want_blame=args.blame, verbose=args.verbose)
+ if args.histogram:
+ print()
+ histogram(results)
diff --git a/poky/scripts/contrib/patchtest.sh b/poky/scripts/contrib/patchtest.sh
new file mode 100755
index 000000000..7fe566666
--- /dev/null
+++ b/poky/scripts/contrib/patchtest.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: Run patchtest on commits starting at master
+#
+# Copyright (c) 2017, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+set -o errexit
+
+# Default values
+pokydir=''
+
+usage() {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-h] [-p pokydir]
+ -p pokydir Defaults to current directory
+EOM
+>&2
+ exit 1
+}
+
+function clone() {
+ local REPOREMOTE=$1
+ local REPODIR=$2
+ if [ ! -d $REPODIR ]; then
+ git clone $REPOREMOTE $REPODIR --quiet
+ else
+ ( cd $REPODIR; git pull --quiet )
+ fi
+}
+
+while getopts ":p:h" opt; do
+ case $opt in
+ p)
+ pokydir=$OPTARG
+ ;;
+ h)
+ usage
+ ;;
+ \?)
+ echo "Invalid option: -$OPTARG" >&2
+ usage
+ ;;
+ :)
+ echo "Option -$OPTARG requires an argument." >&2
+ usage
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+CDIR="$PWD"
+
+# default pokydir to current directory if user did not specify one
+if [ -z "$pokydir" ]; then
+ pokydir="$CDIR"
+fi
+
+PTENV="$PWD/patchtest"
+PT="$PTENV/patchtest"
+PTOE="$PTENV/patchtest-oe"
+
+if ! which virtualenv > /dev/null; then
+ echo "Install virtualenv before proceeding"
+ exit 1;
+fi
+
+# activate the virtual env
+virtualenv $PTENV --quiet
+source $PTENV/bin/activate
+
+cd $PTENV
+
+# clone or pull
+clone git://git.yoctoproject.org/patchtest $PT
+clone git://git.yoctoproject.org/patchtest-oe $PTOE
+
+# install requirements
+pip install -r $PT/requirements.txt --quiet
+pip install -r $PTOE/requirements.txt --quiet
+
+PATH="$PT:$PT/scripts:$PATH"
+
+# loop through parent to HEAD and execute patchtest on each commit
+for commit in $(git rev-list master..HEAD --reverse)
+do
+ shortlog="$(git log "$commit^1..$commit" --pretty='%h: %aN: %cd: %s')"
+ log="$(git format-patch "$commit^1..$commit" --stdout | patchtest - -r $pokydir -s $PTOE/tests --base-commit $commit^1 --json 2>/dev/null | create-summary --fail --only-results)"
+ if [ -z "$log" ]; then
+ shortlog="$shortlog: OK"
+ else
+ shortlog="$shortlog: FAIL"
+ fi
+ echo "$shortlog"
+ echo "$log" | sed -n -e '/Issue/p' -e '/Suggested fix/p'
+ echo ""
+done
+
+deactivate
+
+cd $CDIR
diff --git a/poky/scripts/contrib/serdevtry b/poky/scripts/contrib/serdevtry
new file mode 100755
index 000000000..74bd7b716
--- /dev/null
+++ b/poky/scripts/contrib/serdevtry
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+# Copyright (C) 2014 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+if [ "$1" = "" -o "$1" = "--help" ] ; then
+ echo "Usage: $0 <serial terminal command>"
+ echo
+ echo "Simple script to handle maintaining a terminal for serial devices that"
+ echo "disappear when a device is powered down or reset, such as the USB"
+ echo "serial console on the original BeagleBone (white version)."
+ echo
+ echo "e.g. $0 picocom -b 115200 /dev/ttyUSB0"
+ echo
+ exit
+fi
+
+args="$@"
+DEVICE=""
+while [ "$1" != "" ]; do
+ case "$1" in
+ /dev/*)
+ DEVICE=$1
+ break;;
+ esac
+ shift
+done
+
+if [ "$DEVICE" != "" ] ; then
+ while true; do
+ if [ ! -e $DEVICE ] ; then
+ echo "serdevtry: waiting for $DEVICE to exist..."
+ while [ ! -e $DEVICE ]; do
+ sleep 0.1
+ done
+ fi
+ if [ ! -w $DEVICE ] ; then
+ # Sometimes (presumably because of a race with udev) we get to
+ # the device before its permissions have been set up
+ RETRYNUM=0
+ while [ ! -w $DEVICE ]; do
+ if [ "$RETRYNUM" = "2" ] ; then
+ echo "Device $DEVICE exists but is not writable!"
+ exit 1
+ fi
+ RETRYNUM=$((RETRYNUM+1))
+ sleep 0.1
+ done
+ fi
+ $args
+ if [ -e $DEVICE ] ; then
+ break
+ fi
+ done
+else
+ echo "Unable to determine device node from command: $args"
+ exit 1
+fi
+
diff --git a/poky/scripts/contrib/test_build_time.sh b/poky/scripts/contrib/test_build_time.sh
new file mode 100755
index 000000000..9e5725ae5
--- /dev/null
+++ b/poky/scripts/contrib/test_build_time.sh
@@ -0,0 +1,237 @@
+#!/bin/bash
+
+# Build performance regression test script
+#
+# Copyright 2011 Intel Corporation
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# DESCRIPTION
+# This script is intended to be used in conjunction with "git bisect run"
+# in order to find regressions in build time, however it can also be used
+# independently. It cleans out the build output directories, runs a
+# specified worker script (an example is test_build_time_worker.sh) under
+# TIME(1), logs the results to TEST_LOGDIR (default /tmp) and returns a
+# value telling "git bisect run" whether the build time is good (under
+# the specified threshold) or bad (over it). There is also a tolerance
+# option but it is not particularly useful as it only subtracts the
+# tolerance from the given threshold and uses it as the actual threshold.
+#
+# It is also capable of taking a file listing git revision hashes to be
+# test-applied to the repository in order to get past build failures that
+# would otherwise cause certain revisions to have to be skipped; if a
+# revision does not apply cleanly then the script assumes it does not
+# need to be applied and ignores it.
+#
+# Please see the help output (syntax below) for some important setup
+# instructions.
+#
+# AUTHORS
+# Paul Eggleton <paul.eggleton@linux.intel.com>
+
+
+syntax() {
+ echo "syntax: $0 <script> <time> <tolerance> [patchrevlist]"
+ echo ""
+ echo " script - worker script file (if in current dir, prefix with ./)"
+ echo " time - time threshold (in seconds, suffix m for minutes)"
+ echo " tolerance - tolerance (in seconds, suffix m for minutes or % for"
+ echo " percentage, can be 0)"
+ echo " patchrevlist - optional file listing revisions to apply as patches on top"
+ echo ""
+ echo "You must set TEST_BUILDDIR to point to a previously created build directory,"
+ echo "however please note that this script will wipe out the TMPDIR defined in"
+ echo "TEST_BUILDDIR/conf/local.conf as part of its initial setup (as well as your"
+ echo "~/.ccache)"
+ echo ""
+ echo "To get rid of the sudo prompt, please add the following line to /etc/sudoers"
+ echo "(use 'visudo' to edit this; also it is assumed that the user you are running"
+ echo "as is a member of the 'wheel' group):"
+ echo ""
+ echo "%wheel ALL=(ALL) NOPASSWD: /sbin/sysctl -w vm.drop_caches=[1-3]"
+ echo ""
+ echo "Note: it is recommended that you disable crond and any other process that"
+ echo "may cause significant CPU or I/O usage during build performance tests."
+}
+
+# Note - we exit with 250 here because that will tell git bisect run that
+# something bad happened and stop
+if [ "$1" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if [ "$2" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if [ "$3" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if ! [[ "$2" =~ ^[0-9][0-9m.]*$ ]] ; then
+ echo "'$2' is not a valid number for threshold"
+ exit 250
+fi
+
+if ! [[ "$3" =~ ^[0-9][0-9m.%]*$ ]] ; then
+ echo "'$3' is not a valid number for tolerance"
+ exit 250
+fi
+
+if [ "$TEST_BUILDDIR" = "" ] ; then
+ echo "Please set TEST_BUILDDIR to a previously created build directory"
+ exit 250
+fi
+
+if [ ! -d "$TEST_BUILDDIR" ] ; then
+ echo "TEST_BUILDDIR $TEST_BUILDDIR not found"
+ exit 250
+fi
+
+git diff --quiet
+if [ $? != 0 ] ; then
+ echo "Working tree is dirty, cannot proceed"
+ exit 251
+fi
+
+if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then
+ echo "WARNING: you are running after sourcing the build environment script, this is not recommended"
+fi
+
+runscript=$1
+timethreshold=$2
+tolerance=$3
+
+if [ "$4" != "" ] ; then
+ patchrevlist=`cat $4`
+else
+ patchrevlist=""
+fi
+
+if [[ timethreshold == *m* ]] ; then
+ timethreshold=`echo $timethreshold | sed s/m/*60/ | bc`
+fi
+
+if [[ $tolerance == *m* ]] ; then
+ tolerance=`echo $tolerance | sed s/m/*60/ | bc`
+elif [[ $tolerance == *%* ]] ; then
+ tolerance=`echo $tolerance | sed s/%//`
+ tolerance=`echo "scale = 2; (($tolerance * $timethreshold) / 100)" | bc`
+fi
+
+tmpdir=`grep "^TMPDIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/TMPDIR[ \t]*=[ \t\?]*"//' -e 's/"//'`
+if [ "x$tmpdir" = "x" ]; then
+ echo "Unable to determine TMPDIR from $TEST_BUILDDIR/conf/local.conf, bailing out"
+ exit 250
+fi
+sstatedir=`grep "^SSTATE_DIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/SSTATE_DIR[ \t\?]*=[ \t]*"//' -e 's/"//'`
+if [ "x$sstatedir" = "x" ]; then
+ echo "Unable to determine SSTATE_DIR from $TEST_BUILDDIR/conf/local.conf, bailing out"
+ exit 250
+fi
+
+if [ `expr length $tmpdir` -lt 4 ] ; then
+ echo "TMPDIR $tmpdir is less than 4 characters, bailing out"
+ exit 250
+fi
+
+if [ `expr length $sstatedir` -lt 4 ] ; then
+ echo "SSTATE_DIR $sstatedir is less than 4 characters, bailing out"
+ exit 250
+fi
+
+echo -n "About to wipe out TMPDIR $tmpdir, press Ctrl+C to break out... "
+for i in 9 8 7 6 5 4 3 2 1
+do
+ echo -ne "\x08$i"
+ sleep 1
+done
+echo
+
+pushd . > /dev/null
+
+rm -f pseudodone
+echo "Removing TMPDIR $tmpdir..."
+rm -rf $tmpdir
+echo "Removing TMPDIR $tmpdir-*libc..."
+rm -rf $tmpdir-*libc
+echo "Removing SSTATE_DIR $sstatedir..."
+rm -rf $sstatedir
+echo "Removing ~/.ccache..."
+rm -rf ~/.ccache
+
+echo "Syncing..."
+sync
+sync
+echo "Dropping VM cache..."
+#echo 3 > /proc/sys/vm/drop_caches
+sudo /sbin/sysctl -w vm.drop_caches=3 > /dev/null
+
+if [ "$TEST_LOGDIR" = "" ] ; then
+ logdir="/tmp"
+else
+ logdir="$TEST_LOGDIR"
+fi
+rev=`git rev-parse HEAD`
+logfile="$logdir/timelog_$rev.log"
+echo -n > $logfile
+
+gitroot=`git rev-parse --show-toplevel`
+cd $gitroot
+for patchrev in $patchrevlist ; do
+ echo "Applying $patchrev"
+ patchfile=`mktemp`
+ git show $patchrev > $patchfile
+ git apply --check $patchfile &> /dev/null
+ if [ $? != 0 ] ; then
+ echo " ... patch does not apply without errors, ignoring"
+ else
+ echo "Applied $patchrev" >> $logfile
+ git apply $patchfile &> /dev/null
+ fi
+ rm $patchfile
+done
+
+sync
+echo "Quiescing for 5s..."
+sleep 5
+
+echo "Running $runscript at $rev..."
+timeoutfile=`mktemp`
+/usr/bin/time -o $timeoutfile -f "%e\nreal\t%E\nuser\t%Us\nsys\t%Ss\nmaxm\t%Mk" $runscript 2>&1 | tee -a $logfile
+exitstatus=$PIPESTATUS
+
+git reset --hard HEAD > /dev/null
+popd > /dev/null
+
+timeresult=`head -n1 $timeoutfile`
+cat $timeoutfile | tee -a $logfile
+rm $timeoutfile
+
+if [ $exitstatus != 0 ] ; then
+ # Build failed, exit with 125 to tell git bisect run to skip this rev
+ echo "*** Build failed (exit code $exitstatus), skipping..." | tee -a $logfile
+ exit 125
+fi
+
+ret=`echo "scale = 2; $timeresult > $timethreshold - $tolerance" | bc`
+echo "Returning $ret" | tee -a $logfile
+exit $ret
+
diff --git a/poky/scripts/contrib/test_build_time_worker.sh b/poky/scripts/contrib/test_build_time_worker.sh
new file mode 100755
index 000000000..8e20a9ea7
--- /dev/null
+++ b/poky/scripts/contrib/test_build_time_worker.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# This is an example script to be used in conjunction with test_build_time.sh
+
+if [ "$TEST_BUILDDIR" = "" ] ; then
+ echo "TEST_BUILDDIR is not set"
+ exit 1
+fi
+
+buildsubdir=`basename $TEST_BUILDDIR`
+if [ ! -d $buildsubdir ] ; then
+ echo "Unable to find build subdir $buildsubdir in current directory"
+ exit 1
+fi
+
+if [ -f oe-init-build-env ] ; then
+ . ./oe-init-build-env $buildsubdir
+elif [ -f poky-init-build-env ] ; then
+ . ./poky-init-build-env $buildsubdir
+else
+ echo "Unable to find build environment setup script"
+ exit 1
+fi
+
+if [ -f ../meta/recipes-sato/images/core-image-sato.bb ] ; then
+ target="core-image-sato"
+else
+ target="poky-image-sato"
+fi
+
+echo "Build started at `date "+%Y-%m-%d %H:%M:%S"`"
+echo "bitbake $target"
+bitbake $target
+ret=$?
+echo "Build finished at `date "+%Y-%m-%d %H:%M:%S"`"
+exit $ret
+
diff --git a/poky/scripts/contrib/uncovered b/poky/scripts/contrib/uncovered
new file mode 100755
index 000000000..a8399ad17
--- /dev/null
+++ b/poky/scripts/contrib/uncovered
@@ -0,0 +1,39 @@
+#!/bin/bash -eur
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Find python modules uncovered by oe-seltest
+#
+# Copyright (c) 2016, Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+#
+
+if [ ! "$#" -eq 1 -o -t 0 ] ; then
+ echo 'Usage: coverage report | ./scripts/contrib/uncovered <dir>' 1>&2
+ exit 1
+fi
+
+path=$(readlink -ev $1)
+
+if [ ! -d "$path" ] ; then
+ echo "directory $1 doesn't exist" 1>&2
+ exit 1
+fi
+
+diff -u <(grep "$path" | grep -v '0%$' | cut -f1 -d: | sort) \
+ <(find $path | xargs file | grep 'Python script' | cut -f1 -d:| sort) | \
+ grep "^+$path" | cut -c2-
diff --git a/poky/scripts/contrib/verify-homepage.py b/poky/scripts/contrib/verify-homepage.py
new file mode 100755
index 000000000..76f1749cf
--- /dev/null
+++ b/poky/scripts/contrib/verify-homepage.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+
+# This script can be used to verify HOMEPAGE values for all recipes in
+# the current configuration.
+# The result is influenced by network environment, since the timeout of connect url is 5 seconds as default.
+
+import sys
+import os
+import subprocess
+import urllib.request
+
+
+# Allow importing scripts/lib modules
+scripts_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/..')
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptpath
+import scriptutils
+
+# Allow importing bitbake modules
+bitbakepath = scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+
+logger = scriptutils.logger_create('verify_homepage')
+
+def wgetHomepage(pn, homepage):
+ result = subprocess.call('wget ' + '-q -T 5 -t 1 --spider ' + homepage, shell = True)
+ if result:
+ logger.warn("%s: failed to verify HOMEPAGE: %s " % (pn, homepage))
+ return 1
+ else:
+ return 0
+
+def verifyHomepage(bbhandler):
+ pkg_pn = bbhandler.cooker.recipecaches[''].pkg_pn
+ pnlist = sorted(pkg_pn)
+ count = 0
+ checked = []
+ for pn in pnlist:
+ for fn in pkg_pn[pn]:
+ # There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
+ realfn, _, _ = bb.cache.virtualfn2realfn(fn)
+ if realfn in checked:
+ continue
+ data = bbhandler.parse_recipe_file(realfn)
+ homepage = data.getVar("HOMEPAGE")
+ if homepage:
+ try:
+ urllib.request.urlopen(homepage, timeout=5)
+ except Exception:
+ count = count + wgetHomepage(os.path.basename(realfn), homepage)
+ checked.append(realfn)
+ return count
+
+if __name__=='__main__':
+ with bb.tinfoil.Tinfoil() as bbhandler:
+ bbhandler.prepare()
+ logger.info("Start verifying HOMEPAGE:")
+ failcount = verifyHomepage(bbhandler)
+ logger.info("Finished verifying HOMEPAGE.")
+ logger.info("Summary: %s failed" % failcount)
diff --git a/poky/scripts/cp-noerror b/poky/scripts/cp-noerror
new file mode 100755
index 000000000..35eb211be
--- /dev/null
+++ b/poky/scripts/cp-noerror
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+#
+# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
+# don't error.
+# Also don't error if $1 disappears.
+#
+
+import sys
+import os
+import shutil
+
+def copytree(src, dst, symlinks=False, ignore=None):
+ """Based on shutil.copytree"""
+ names = os.listdir(src)
+ try:
+ os.makedirs(dst)
+ except OSError:
+ # Already exists
+ pass
+ errors = []
+ for name in names:
+ srcname = os.path.join(src, name)
+ dstname = os.path.join(dst, name)
+ try:
+ d = dstname
+ if os.path.isdir(dstname):
+ d = os.path.join(dstname, os.path.basename(srcname))
+ if os.path.exists(d):
+ continue
+ try:
+ os.link(srcname, dstname)
+ except OSError:
+ shutil.copy2(srcname, dstname)
+ # catch the Error from the recursive copytree so that we can
+ # continue with other files
+ except shutil.Error as err:
+ errors.extend(err.args[0])
+ except EnvironmentError as why:
+ errors.append((srcname, dstname, str(why)))
+ try:
+ shutil.copystat(src, dst)
+ except OSError as why:
+ errors.extend((src, dst, str(why)))
+ if errors:
+ raise shutil.Error(errors)
+
+try:
+ copytree(sys.argv[1], sys.argv[2])
+except shutil.Error:
+ pass
+except OSError:
+ pass
diff --git a/poky/scripts/create-pull-request b/poky/scripts/create-pull-request
new file mode 100755
index 000000000..280880b3f
--- /dev/null
+++ b/poky/scripts/create-pull-request
@@ -0,0 +1,303 @@
+#!/bin/sh
+#
+# Copyright (c) 2010-2013, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+#
+# This script is intended to be used to prepare a series of patches
+# and a cover letter in an appropriate and consistent format for
+# submission to Open Embedded and The Yocto Project, as well as to
+# related projects and layers.
+#
+
+ODIR=pull-$$
+RELATIVE_TO="master"
+COMMIT_ID="HEAD"
+PREFIX="PATCH"
+RFC=0
+
+usage() {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to] [-i commit_id] [-d relative_dir] -u remote [-b branch] [-- <format-patch options>]
+ -b branch Branch name in the specified remote (default: current branch)
+ -l local branch Local branch name (default: HEAD)
+ -c Create an RFC (Request for Comment) patch series
+ -h Display this help message
+ -a Automatically push local branch (-l) to remote branch (-b),
+ or set CPR_CONTRIB_AUTO_PUSH in env
+ -i commit_id Ending commit (default: HEAD)
+ -m msg_body_file The file containing a blurb to be inserted into the summary email
+ -o output_dir Specify the output directory for the messages (default: pull-PID)
+ -p prefix Use [prefix N/M] instead of [PATCH N/M] as the subject prefix
+ -r relative_to Starting commit (default: master)
+ -s subject The subject to be inserted into the summary email
+ -u remote The git remote where the branch is located, or set CPR_CONTRIB_REMOTE in env
+ -d relative_dir Generate patches relative to directory
+
+ Examples:
+ $CMD -u contrib -b nitin/basic
+ $CMD -u contrib -r distro/master -i nitin/distro -b nitin/distro
+ $CMD -u contrib -r distro/master -i nitin/distro -b nitin/distro -l distro
+ $CMD -u contrib -r master -i misc -b nitin/misc -o pull-misc
+ $CMD -u contrib -p "RFC PATCH" -b nitin/experimental
+ $CMD -u contrib -i misc -b nitin/misc -d ./bitbake
+ $CMD -u contrib -r origin/master -o /tmp/out.v3 -- -v3 --in-reply-to=20170511120134.XX7799@site.com
+EOM
+}
+
+REMOTE="$CPR_CONTRIB_REMOTE"
+# Parse and validate arguments
+while getopts "b:acd:hi:m:o:p:r:s:u:l:" OPT; do
+ case $OPT in
+ b)
+ BRANCH="$OPTARG"
+ ;;
+ l)
+ L_BRANCH="$OPTARG"
+ ;;
+ c)
+ RFC=1
+ ;;
+ d)
+ RELDIR="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ i)
+ COMMIT_ID="$OPTARG"
+ ;;
+ m)
+ BODY="$OPTARG"
+ if [ ! -e "$BODY" ]; then
+ echo "ERROR: Body file does not exist"
+ exit 1
+ fi
+ ;;
+ o)
+ ODIR="$OPTARG"
+ ;;
+ p)
+ PREFIX="$OPTARG"
+ ;;
+ r)
+ RELATIVE_TO="$OPTARG"
+ ;;
+ s)
+ SUBJECT="$OPTARG"
+ ;;
+ u)
+ REMOTE="$OPTARG"
+ ;;
+ a)
+ CPR_CONTRIB_AUTO_PUSH="1"
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+done
+
+shift "$((OPTIND - 1))"
+extraopts="$@"
+
+if [ -z "$REMOTE" ]; then
+ echo "ERROR: Missing parameter -u or CPR_CONTRIB_REMOTE in env, no git remote!"
+ usage
+ exit 1
+fi
+
+REMOTE_URL=$(git config remote.$REMOTE.url)
+if [ $? -ne 0 ]; then
+ echo "ERROR: git config failed to find a url for '$REMOTE'"
+ echo
+ echo "To add a remote url for $REMOTE, use:"
+ echo " git config remote.$REMOTE.url <url>"
+ exit 1
+fi
+
+# Rewrite private URLs to public URLs
+# Determine the repository name for use in the WEB_URL later
+case "$REMOTE_URL" in
+*@*)
+ USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+ PROTO_RE="[a-z][a-z+]*://"
+ GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
+ REMOTE_URL=${REMOTE_URL%.git}
+ REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
+ REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
+ ;;
+*)
+ echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
+ echo " The pull and browse URLs will likely be incorrect"
+ ;;
+esac
+
+if [ -z "$BRANCH" ]; then
+ BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
+ echo "NOTE: Assuming remote branch '$BRANCH', use -b to override."
+fi
+
+if [ -z "$L_BRANCH" ]; then
+ L_BRANCH=HEAD
+ echo "NOTE: Assuming local branch HEAD, use -l to override."
+fi
+
+if [ $RFC -eq 1 ]; then
+ PREFIX="RFC $PREFIX"
+fi
+
+
+# Set WEB_URL from known remotes
+WEB_URL=""
+case "$REMOTE_URL" in
+ *git.yoctoproject.org*)
+ WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *git.pokylinux.org*)
+ WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *git.openembedded.org*)
+ WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *github.com*)
+ WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
+ ;;
+esac
+
+# Perform a sanity test on the web URL. Issue a warning if it is not
+# accessible, but do not abort as users may want to run offline.
+if [ -n "$WEB_URL" ]; then
+ if [ "$CPR_CONTRIB_AUTO_PUSH" = "1" ]; then
+ echo "Pushing '$BRANCH' on '$REMOTE' as requested..."
+ git push $REMOTE $L_BRANCH:$BRANCH
+ echo ""
+ fi
+ wget --no-check-certificate -q $WEB_URL -O /dev/null
+ if [ $? -ne 0 ]; then
+ echo "WARNING: Branch '$BRANCH' was not found on the contrib git tree."
+ echo " Please check your remote and branch parameter before sending."
+ echo ""
+ fi
+fi
+
+if [ -e $ODIR ]; then
+ echo "ERROR: output directory $ODIR exists."
+ exit 1
+fi
+mkdir $ODIR
+
+if [ -n "$RELDIR" ]; then
+ ODIR=$(realpath $ODIR)
+ pdir=$(pwd)
+ cd $RELDIR
+ extraopts="$extraopts --relative"
+fi
+
+# Generate the patches and cover letter
+git format-patch $extraopts -M40 --subject-prefix="$PREFIX" -n -o $ODIR --thread=shallow --cover-letter $RELATIVE_TO..$COMMIT_ID > /dev/null
+
+if [ -z "$(ls -A $ODIR 2> /dev/null)" ]; then
+ echo "ERROR: $ODIR is empty, no cover letter and patches was generated!"
+ echo " This is most likely due to that \$RRELATIVE_TO..\$COMMIT_ID"
+ echo " ($RELATIVE_TO..$COMMIT_ID) don't contain any differences."
+ rmdir $ODIR
+ exit 1
+fi
+
+[ -n "$RELDIR" ] && cd $pdir
+
+# Customize the cover letter
+CL="$(echo $ODIR/*0000-cover-letter.patch)"
+PM="$ODIR/pull-msg"
+GIT_VERSION=$(`git --version` | tr -d '[:alpha:][:space:].' | sed 's/\(...\).*/\1/')
+NEWER_GIT_VERSION=210
+if [ $GIT_VERSION -lt $NEWER_GIT_VERSION ]; then
+ git request-pull $RELATIVE_TO $REMOTE_URL $COMMIT_ID >> "$PM"
+else
+ git request-pull $RELATIVE_TO $REMOTE_URL $L_BRANCH:$BRANCH >> "$PM"
+fi
+if [ $? -ne 0 ]; then
+ echo "ERROR: git request-pull reported an error"
+ rm -rf $ODIR
+ exit 1
+fi
+
+# The cover letter already has a diffstat, remove it from the pull-msg
+# before inserting it.
+sed -n "0,\#$REMOTE_URL# p" "$PM" | sed -i "/BLURB HERE/ r /dev/stdin" "$CL"
+rm "$PM"
+
+# If this is an RFC, make that clear in the cover letter
+if [ $RFC -eq 1 ]; then
+(cat <<EOM
+Please review the following changes for suitability for inclusion. If you have
+any objections or suggestions for improvement, please respond to the patches. If
+you agree with the changes, please provide your Acked-by.
+
+EOM
+) | sed -i "/BLURB HERE/ r /dev/stdin" "$CL"
+fi
+
+# Insert the WEB_URL if there is one
+if [ -n "$WEB_URL" ]; then
+ echo " $WEB_URL" | sed -i "\#$REMOTE_URL# r /dev/stdin" "$CL"
+fi
+
+
+# If the user specified a message body, insert it into the cover letter and
+# remove the BLURB token.
+if [ -n "$BODY" ]; then
+ sed -i "/BLURB HERE/ r $BODY" "$CL"
+ sed -i "/BLURB HERE/ d" "$CL"
+fi
+
+# Set subject automatically if there is only one patch
+patch_cnt=`git log --pretty=oneline ${RELATIVE_TO}..${L_BRANCH} | wc -l`
+if [ -z "$SUBJECT" -a $patch_cnt -eq 1 ]; then
+ SUBJECT="`git log --format=%s ${RELATIVE_TO}..${L_BRANCH}`"
+fi
+
+# Replace the SUBJECT token with it.
+if [ -n "$SUBJECT" ]; then
+ sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+fi
+
+
+# Generate report for user
+cat <<EOM
+The following patches have been prepared:
+$(for PATCH in $(ls $ODIR/*); do echo " $PATCH"; done)
+
+Review their content, especially the summary mail:
+ $CL
+
+When you are satisfied, you can send them with:
+ send-pull-request -a -p $ODIR
+EOM
+
+# Check the patches for trailing white space
+egrep -q -e "^\+.*\s+$" $ODIR/*
+if [ $? -ne 1 ]; then
+ echo
+ echo "WARNING: Trailing white space detected at these locations"
+ egrep -nH --color -e "^\+.*\s+$" $ODIR/*
+fi
diff --git a/poky/scripts/crosstap b/poky/scripts/crosstap
new file mode 100755
index 000000000..e33fa4ad4
--- /dev/null
+++ b/poky/scripts/crosstap
@@ -0,0 +1,469 @@
+#!/usr/bin/env python3
+#
+# Build a systemtap script for a given image, kernel
+#
+# Effectively script extracts needed information from set of
+# 'bitbake -e' commands and contructs proper invocation of stap on
+# host to build systemtap script for a given target.
+#
+# By default script will compile scriptname.ko that could be copied
+# to taget and activated with 'staprun scriptname.ko' command. Or if
+# --remote user@hostname option is specified script will build, load
+# execute script on target.
+#
+# This script is very similar and inspired by crosstap shell script.
+# The major difference that this script supports user-land related
+# systemtap script, whereas crosstap could deal only with scripts
+# related to kernel.
+#
+# Copyright (c) 2018, Cisco Systems.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import sys
+import re
+import subprocess
+import os
+import optparse
+
+class Stap(object):
+ def __init__(self, script, module, remote):
+ self.script = script
+ self.module = module
+ self.remote = remote
+ self.stap = None
+ self.sysroot = None
+ self.runtime = None
+ self.tapset = None
+ self.arch = None
+ self.cross_compile = None
+ self.kernel_release = None
+ self.target_path = None
+ self.target_ld_library_path = None
+
+ if not self.remote:
+ if not self.module:
+ # derive module name from script
+ self.module = os.path.basename(self.script)
+ if self.module[-4:] == ".stp":
+ self.module = self.module[:-4]
+ # replace - if any with _
+ self.module = self.module.replace("-", "_")
+
+ def command(self, args):
+ ret = []
+ ret.append(self.stap)
+
+ if self.remote:
+ ret.append("--remote")
+ ret.append(self.remote)
+ else:
+ ret.append("-p4")
+ ret.append("-m")
+ ret.append(self.module)
+
+ ret.append("-a")
+ ret.append(self.arch)
+
+ ret.append("-B")
+ ret.append("CROSS_COMPILE=" + self.cross_compile)
+
+ ret.append("-r")
+ ret.append(self.kernel_release)
+
+ ret.append("-I")
+ ret.append(self.tapset)
+
+ ret.append("-R")
+ ret.append(self.runtime)
+
+ if self.sysroot:
+ ret.append("--sysroot")
+ ret.append(self.sysroot)
+
+ ret.append("--sysenv=PATH=" + self.target_path)
+ ret.append("--sysenv=LD_LIBRARY_PATH=" + self.target_ld_library_path)
+
+ ret = ret + args
+
+ ret.append(self.script)
+ return ret
+
+ def additional_environment(self):
+ ret = {}
+ ret["SYSTEMTAP_DEBUGINFO_PATH"] = "+:.debug:build"
+ return ret
+
+ def environment(self):
+ ret = os.environ.copy()
+ additional = self.additional_environment()
+ for e in additional:
+ ret[e] = additional[e]
+ return ret
+
+ def display_command(self, args):
+ additional_env = self.additional_environment()
+ command = self.command(args)
+
+ print("#!/bin/sh")
+ for e in additional_env:
+ print("export %s=\"%s\"" % (e, additional_env[e]))
+ print(" ".join(command))
+
+class BitbakeEnvInvocationException(Exception):
+ def __init__(self, message):
+ self.message = message
+
+class BitbakeEnv(object):
+ BITBAKE="bitbake"
+
+ def __init__(self, package):
+ self.package = package
+ self.cmd = BitbakeEnv.BITBAKE + " -e " + self.package
+ self.popen = subprocess.Popen(self.cmd, shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ self.__lines = self.popen.stdout.readlines()
+ self.popen.wait()
+
+ self.lines = []
+ for line in self.__lines:
+ self.lines.append(line.decode('utf-8'))
+
+ def get_vars(self, vars):
+ if self.popen.returncode:
+ raise BitbakeEnvInvocationException(
+ "\nFailed to execute '" + self.cmd +
+ "' with the following message:\n" +
+ ''.join(self.lines))
+
+ search_patterns = []
+ retdict = {}
+ for var in vars:
+ # regular not exported variable
+ rexpr = "^" + var + "=\"(.*)\""
+ re_compiled = re.compile(rexpr)
+ search_patterns.append((var, re_compiled))
+
+ # exported variable
+ rexpr = "^export " + var + "=\"(.*)\""
+ re_compiled = re.compile(rexpr)
+ search_patterns.append((var, re_compiled))
+
+ for line in self.lines:
+ for var, rexpr in search_patterns:
+ m = rexpr.match(line)
+ if m:
+ value = m.group(1)
+ retdict[var] = value
+
+ # fill variables values in order how they were requested
+ ret = []
+ for var in vars:
+ ret.append(retdict.get(var))
+
+ # if it is single value list return it as scalar, not the list
+ if len(ret) == 1:
+ ret = ret[0]
+
+ return ret
+
+class ParamDiscovery(object):
+ SYMBOLS_CHECK_MESSAGE = """
+WARNING: image '%s' does not have dbg-pkgs IMAGE_FEATURES enabled and no
+"image-combined-dbg" in inherited classes is specified. As result the image
+does not have symbols for user-land processes DWARF based probes. Consider
+adding 'dbg-pkgs' to EXTRA_IMAGE_FEATURES or adding "image-combined-dbg" to
+USER_CLASSES. I.e add this line 'USER_CLASSES += "image-combined-dbg"' to
+local.conf file.
+
+Or you may use IMAGE_GEN_DEBUGFS="1" option, and then after build you need
+recombine/unpack image and image-dbg tarballs and pass resulting dir location
+with --sysroot option.
+"""
+
+ def __init__(self, image):
+ self.image = image
+
+ self.image_rootfs = None
+ self.image_features = None
+ self.image_gen_debugfs = None
+ self.inherit = None
+ self.base_bindir = None
+ self.base_sbindir = None
+ self.base_libdir = None
+ self.bindir = None
+ self.sbindir = None
+ self.libdir = None
+
+ self.staging_bindir_toolchain = None
+ self.target_prefix = None
+ self.target_arch = None
+ self.target_kernel_builddir = None
+
+ self.staging_dir_native = None
+
+ self.image_combined_dbg = False
+
+ def discover(self):
+ if self.image:
+ benv_image = BitbakeEnv(self.image)
+ (self.image_rootfs,
+ self.image_features,
+ self.image_gen_debugfs,
+ self.inherit,
+ self.base_bindir,
+ self.base_sbindir,
+ self.base_libdir,
+ self.bindir,
+ self.sbindir,
+ self.libdir
+ ) = benv_image.get_vars(
+ ("IMAGE_ROOTFS",
+ "IMAGE_FEATURES",
+ "IMAGE_GEN_DEBUGFS",
+ "INHERIT",
+ "base_bindir",
+ "base_sbindir",
+ "base_libdir",
+ "bindir",
+ "sbindir",
+ "libdir"
+ ))
+
+ benv_kernel = BitbakeEnv("virtual/kernel")
+ (self.staging_bindir_toolchain,
+ self.target_prefix,
+ self.target_arch,
+ self.target_kernel_builddir
+ ) = benv_kernel.get_vars(
+ ("STAGING_BINDIR_TOOLCHAIN",
+ "TARGET_PREFIX",
+ "TRANSLATED_TARGET_ARCH",
+ "B"
+ ))
+
+ benv_systemtap = BitbakeEnv("systemtap-native")
+ (self.staging_dir_native
+ ) = benv_systemtap.get_vars(["STAGING_DIR_NATIVE"])
+
+ if self.inherit:
+ if "image-combined-dbg" in self.inherit.split():
+ self.image_combined_dbg = True
+
+ def check(self, sysroot_option):
+ ret = True
+ if self.image_rootfs:
+ sysroot = self.image_rootfs
+ if not os.path.isdir(self.image_rootfs):
+ print("ERROR: Cannot find '" + sysroot +
+ "' directory. Was '" + self.image + "' image built?")
+ ret = False
+
+ stap = self.staging_dir_native + "/usr/bin/stap"
+ if not os.path.isfile(stap):
+ print("ERROR: Cannot find '" + stap +
+ "'. Was 'systemtap-native' built?")
+ ret = False
+
+ if not os.path.isdir(self.target_kernel_builddir):
+ print("ERROR: Cannot find '" + self.target_kernel_builddir +
+ "' directory. Was 'kernel/virtual' built?")
+ ret = False
+
+ if not sysroot_option and self.image_rootfs:
+ dbg_pkgs_found = False
+
+ if self.image_features:
+ image_features = self.image_features.split()
+ if "dbg-pkgs" in image_features:
+ dbg_pkgs_found = True
+
+ if not dbg_pkgs_found \
+ and not self.image_combined_dbg:
+ print(ParamDiscovery.SYMBOLS_CHECK_MESSAGE % (self.image))
+
+ if not ret:
+ print("")
+
+ return ret
+
+ def __map_systemtap_arch(self):
+ a = self.target_arch
+ ret = a
+ if re.match('(athlon|x86.64)$', a):
+ ret = 'x86_64'
+ elif re.match('i.86$', a):
+ ret = 'i386'
+ elif re.match('arm$', a):
+ ret = 'arm'
+ elif re.match('aarch64$', a):
+ ret = 'arm64'
+ elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a):
+ ret = 'mips'
+ elif re.match('p(pc|owerpc)(|64)', a):
+ ret = 'powerpc'
+ return ret
+
+ def fill_stap(self, stap):
+ stap.stap = self.staging_dir_native + "/usr/bin/stap"
+ if not stap.sysroot:
+ if self.image_rootfs:
+ if self.image_combined_dbg:
+ stap.sysroot = self.image_rootfs + "-dbg"
+ else:
+ stap.sysroot = self.image_rootfs
+ stap.runtime = self.staging_dir_native + "/usr/share/systemtap/runtime"
+ stap.tapset = self.staging_dir_native + "/usr/share/systemtap/tapset"
+ stap.arch = self.__map_systemtap_arch()
+ stap.cross_compile = self.staging_bindir_toolchain + "/" + \
+ self.target_prefix
+ stap.kernel_release = self.target_kernel_builddir
+
+ # do we have standard that tells in which order these need to appear
+ target_path = []
+ if self.sbindir:
+ target_path.append(self.sbindir)
+ if self.bindir:
+ target_path.append(self.bindir)
+ if self.base_sbindir:
+ target_path.append(self.base_sbindir)
+ if self.base_bindir:
+ target_path.append(self.base_bindir)
+ stap.target_path = ":".join(target_path)
+
+ target_ld_library_path = []
+ if self.libdir:
+ target_ld_library_path.append(self.libdir)
+ if self.base_libdir:
+ target_ld_library_path.append(self.base_libdir)
+ stap.target_ld_library_path = ":".join(target_ld_library_path)
+
+
+def main():
+ usage = """usage: %prog -s <systemtap-script> [options] [-- [systemtap options]]
+
+%prog cross compile given SystemTap script against given image, kernel
+
+It needs to run in environtment set for bitbake - it uses bitbake -e
+invocations to retrieve information to construct proper stap cross build
+invocation arguments. It assumes that systemtap-native is built in given
+bitbake workspace.
+
+Anything after -- option is passed directly to stap.
+
+Legacy script invocation style supported but depreciated:
+ %prog <user@hostname> <sytemtap-script> [systemtap options]
+
+To enable most out of systemtap the following site.conf or local.conf
+configuration is recommended:
+
+# enables symbol + target binaries rootfs-dbg in workspace
+IMAGE_GEN_DEBUGFS = "1"
+IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
+USER_CLASSES += "image-combined-dbg"
+
+# enables kernel debug symbols
+KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+
+# minimal, just run-time systemtap configuration in target image
+PACKAGECONFIG_pn-systemtap = "monitor"
+
+# add systemtap run-time into target image if it is not there yet
+IMAGE_INSTALL_append = " systemtap"
+"""
+ option_parser = optparse.OptionParser(usage=usage)
+
+ option_parser.add_option("-s", "--script", dest="script",
+ help="specify input script FILE name",
+ metavar="FILE")
+
+ option_parser.add_option("-i", "--image", dest="image",
+ help="specify image name for which script should be compiled")
+
+ option_parser.add_option("-r", "--remote", dest="remote",
+ help="specify username@hostname of remote target to run script "
+ "optional, it assumes that remote target can be accessed through ssh")
+
+ option_parser.add_option("-m", "--module", dest="module",
+ help="specify module name, optional, has effect only if --remote is not used, "
+ "if not specified module name will be derived from passed script name")
+
+ option_parser.add_option("-y", "--sysroot", dest="sysroot",
+ help="explicitely specify image sysroot location. May need to use it in case "
+ "when IMAGE_GEN_DEBUGFS=\"1\" option is used and recombined with symbols "
+ "in different location",
+ metavar="DIR")
+
+ option_parser.add_option("-o", "--out", dest="out",
+ action="store_true",
+ help="output shell script that equvivalent invocation of this script with "
+ "given set of arguments, in given bitbake environment. It could be stored in "
+ "separate shell script and could be repeated without incuring bitbake -e "
+ "invocation overhead",
+ default=False)
+
+ option_parser.add_option("-d", "--debug", dest="debug",
+ action="store_true",
+ help="enable debug output. Use this option to see resulting stap invocation",
+ default=False)
+
+ # is invocation follow syntax from orignal crosstap shell script
+ legacy_args = False
+
+ # check if we called the legacy way
+ if len(sys.argv) >= 3:
+ if sys.argv[1].find("@") != -1 and os.path.exists(sys.argv[2]):
+ legacy_args = True
+
+ # fill options values for legacy invocation case
+ options = optparse.Values
+ options.script = sys.argv[2]
+ options.remote = sys.argv[1]
+ options.image = None
+ options.module = None
+ options.sysroot = None
+ options.out = None
+ options.debug = None
+ remaining_args = sys.argv[3:]
+
+ if not legacy_args:
+ (options, remaining_args) = option_parser.parse_args()
+
+ if not options.script or not os.path.exists(options.script):
+ print("'-s FILE' option is missing\n")
+ option_parser.print_help()
+ else:
+ stap = Stap(options.script, options.module, options.remote)
+ discovery = ParamDiscovery(options.image)
+ discovery.discover()
+ if not discovery.check(options.sysroot):
+ option_parser.print_help()
+ else:
+ stap.sysroot = options.sysroot
+ discovery.fill_stap(stap)
+
+ if options.out:
+ stap.display_command(remaining_args)
+ else:
+ cmd = stap.command(remaining_args)
+ env = stap.environment()
+
+ if options.debug:
+ print(" ".join(cmd))
+
+ os.execve(cmd[0], cmd, env)
+
+main()
diff --git a/poky/scripts/devtool b/poky/scripts/devtool
new file mode 100755
index 000000000..d681a1929
--- /dev/null
+++ b/poky/scripts/devtool
@@ -0,0 +1,349 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded Development tool
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import re
+import configparser
+import subprocess
+import logging
+
+basepath = ''
+workspace = {}
+config = None
+context = None
+
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+from devtool import DevtoolError, setup_tinfoil
+import scriptutils
+import argparse_oe
+logger = scriptutils.logger_create('devtool')
+
+plugins = []
+
+
+class ConfigHandler(object):
+ config_file = ''
+ config_obj = None
+ init_path = ''
+ workspace_path = ''
+
+ def __init__(self, filename):
+ self.config_file = filename
+ self.config_obj = configparser.SafeConfigParser()
+
+ def get(self, section, option, default=None):
+ try:
+ ret = self.config_obj.get(section, option)
+ except (configparser.NoOptionError, configparser.NoSectionError):
+ if default != None:
+ ret = default
+ else:
+ raise
+ return ret
+
+ def read(self):
+ if os.path.exists(self.config_file):
+ self.config_obj.read(self.config_file)
+
+ if self.config_obj.has_option('General', 'init_path'):
+ pth = self.get('General', 'init_path')
+ self.init_path = os.path.join(basepath, pth)
+ if not os.path.exists(self.init_path):
+ logger.error('init_path %s specified in config file cannot be found' % pth)
+ return False
+ else:
+ self.config_obj.add_section('General')
+
+ self.workspace_path = self.get('General', 'workspace_path', os.path.join(basepath, 'workspace'))
+ return True
+
+
+ def write(self):
+ logger.debug('writing to config file %s' % self.config_file)
+ self.config_obj.set('General', 'workspace_path', self.workspace_path)
+ with open(self.config_file, 'w') as f:
+ self.config_obj.write(f)
+
+ def set(self, section, option, value):
+ if not self.config_obj.has_section(section):
+ self.config_obj.add_section(section)
+ self.config_obj.set(section, option, value)
+
+class Context:
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+def read_workspace():
+ global workspace
+ workspace = {}
+ if not os.path.exists(os.path.join(config.workspace_path, 'conf', 'layer.conf')):
+ if context.fixed_setup:
+ logger.error("workspace layer not set up")
+ sys.exit(1)
+ else:
+ logger.info('Creating workspace layer in %s' % config.workspace_path)
+ _create_workspace(config.workspace_path, config, basepath)
+ if not context.fixed_setup:
+ _enable_workspace_layer(config.workspace_path, config, basepath)
+
+ logger.debug('Reading workspace in %s' % config.workspace_path)
+ externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$')
+ for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
+ with open(fn, 'r') as f:
+ pnvalues = {}
+ for line in f:
+ res = externalsrc_re.match(line.rstrip())
+ if res:
+ recipepn = os.path.splitext(os.path.basename(fn))[0].split('_')[0]
+ pn = res.group(2) or recipepn
+ # Find the recipe file within the workspace, if any
+ bbfile = os.path.basename(fn).replace('.bbappend', '.bb').replace('%', '*')
+ recipefile = glob.glob(os.path.join(config.workspace_path,
+ 'recipes',
+ recipepn,
+ bbfile))
+ if recipefile:
+ recipefile = recipefile[0]
+ pnvalues['srctree'] = res.group(3)
+ pnvalues['bbappend'] = fn
+ pnvalues['recipefile'] = recipefile
+ elif line.startswith('# srctreebase: '):
+ pnvalues['srctreebase'] = line.split(':', 1)[1].strip()
+ if pnvalues:
+ if not pnvalues.get('srctreebase', None):
+ pnvalues['srctreebase'] = pnvalues['srctree']
+ logger.debug('Found recipe %s' % pnvalues)
+ workspace[pn] = pnvalues
+
+def create_workspace(args, config, basepath, workspace):
+ if args.layerpath:
+ workspacedir = os.path.abspath(args.layerpath)
+ else:
+ workspacedir = os.path.abspath(os.path.join(basepath, 'workspace'))
+ _create_workspace(workspacedir, config, basepath)
+ if not args.create_only:
+ _enable_workspace_layer(workspacedir, config, basepath)
+
+def _create_workspace(workspacedir, config, basepath):
+ import bb
+
+ confdir = os.path.join(workspacedir, 'conf')
+ if os.path.exists(os.path.join(confdir, 'layer.conf')):
+ logger.info('Specified workspace already set up, leaving as-is')
+ else:
+ # Add a config file
+ bb.utils.mkdirhier(confdir)
+ with open(os.path.join(confdir, 'layer.conf'), 'w') as f:
+ f.write('# ### workspace layer auto-generated by devtool ###\n')
+ f.write('BBPATH =. "$' + '{LAYERDIR}:"\n')
+ f.write('BBFILES += "$' + '{LAYERDIR}/recipes/*/*.bb \\\n')
+ f.write(' $' + '{LAYERDIR}/appends/*.bbappend"\n')
+ f.write('BBFILE_COLLECTIONS += "workspacelayer"\n')
+ f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n')
+ f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n')
+ f.write('BBFILE_PRIORITY_workspacelayer = "99"\n')
+ f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n')
+ # Add a README file
+ with open(os.path.join(workspacedir, 'README'), 'w') as f:
+ f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
+ f.write('contain recipes and bbappends that are currently being worked on. The idea\n')
+ f.write('is that the contents is temporary - once you have finished working on a\n')
+ f.write('recipe you use the appropriate method to move the files you have been\n')
+ f.write('working on to a proper layer. In most instances you should use the\n')
+ f.write('devtool utility to manage files within it rather than modifying files\n')
+ f.write('directly (although recipes added with "devtool add" will often need\n')
+ f.write('direct modification.)\n')
+ f.write('\nIf you no longer need to use devtool or the workspace layer\'s contents\n')
+ f.write('you can remove the path to this workspace layer from your conf/bblayers.conf\n')
+ f.write('file (and then delete the layer, if you wish).\n')
+ f.write('\nNote that by default, if devtool fetches and unpacks source code, it\n')
+ f.write('will place it in a subdirectory of a "sources" subdirectory of the\n')
+ f.write('layer. If you prefer it to be elsewhere you can specify the source\n')
+ f.write('tree path on the command line.\n')
+
+def _enable_workspace_layer(workspacedir, config, basepath):
+ """Ensure the workspace layer is in bblayers.conf"""
+ import bb
+ bblayers_conf = os.path.join(basepath, 'conf', 'bblayers.conf')
+ if not os.path.exists(bblayers_conf):
+ logger.error('Unable to find bblayers.conf')
+ return
+ if os.path.abspath(workspacedir) != os.path.abspath(config.workspace_path):
+ removedir = config.workspace_path
+ else:
+ removedir = None
+ _, added = bb.utils.edit_bblayers_conf(bblayers_conf, workspacedir, removedir)
+ if added:
+ logger.info('Enabling workspace layer in bblayers.conf')
+ if config.workspace_path != workspacedir:
+ # Update our config to point to the new location
+ config.workspace_path = workspacedir
+ config.write()
+
+
+def main():
+ global basepath
+ global config
+ global context
+
+ if sys.getfilesystemencoding() != "utf-8":
+ sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
+
+ context = Context(fixed_setup=False)
+
+ # Default basepath
+ basepath = os.path.dirname(os.path.abspath(__file__))
+
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded development tool",
+ add_help=False,
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('--basepath', help='Base directory of SDK / build directory')
+ parser.add_argument('--bbpath', help='Explicitly specify the BBPATH, rather than getting it from the metadata')
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
+
+ global_args, unparsed_args = parser.parse_known_args()
+
+ # Help is added here rather than via add_help=True, as we don't want it to
+ # be handled by parse_known_args()
+ parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ if global_args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif global_args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if global_args.basepath:
+ # Override
+ basepath = global_args.basepath
+ if os.path.exists(os.path.join(basepath, '.devtoolbase')):
+ context.fixed_setup = True
+ else:
+ pth = basepath
+ while pth != '' and pth != os.sep:
+ if os.path.exists(os.path.join(pth, '.devtoolbase')):
+ context.fixed_setup = True
+ basepath = pth
+ break
+ pth = os.path.dirname(pth)
+
+ if not context.fixed_setup:
+ basepath = os.environ.get('BUILDDIR')
+ if not basepath:
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
+
+ logger.debug('Using basepath %s' % basepath)
+
+ config = ConfigHandler(os.path.join(basepath, 'conf', 'devtool.conf'))
+ if not config.read():
+ return -1
+ context.config = config
+
+ bitbake_subdir = config.get('General', 'bitbake_subdir', '')
+ if bitbake_subdir:
+ # Normally set for use within the SDK
+ logger.debug('Using bitbake subdir %s' % bitbake_subdir)
+ sys.path.insert(0, os.path.join(basepath, bitbake_subdir, 'lib'))
+ core_meta_subdir = config.get('General', 'core_meta_subdir')
+ sys.path.insert(0, os.path.join(basepath, core_meta_subdir, 'lib'))
+ else:
+ # Standard location
+ import scriptpath
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+ logger.debug('Using standard bitbake path %s' % bitbakepath)
+ scriptpath.add_oe_lib_path()
+
+ scriptutils.logger_setup_color(logger, global_args.color)
+
+ if global_args.bbpath is None:
+ try:
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ global_args.bbpath = tinfoil.config_data.getVar('BBPATH')
+ finally:
+ tinfoil.shutdown()
+ except bb.BBHandledException:
+ return 2
+
+ # Search BBPATH first to allow layers to override plugins in scripts_path
+ for path in global_args.bbpath.split(':') + [scripts_path]:
+ pluginpath = os.path.join(path, 'lib', 'devtool')
+ scriptutils.load_plugins(logger, plugins, pluginpath)
+
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ subparsers.add_subparser_group('sdk', 'SDK maintenance', -2)
+ subparsers.add_subparser_group('advanced', 'Advanced', -1)
+ subparsers.add_subparser_group('starting', 'Beginning work on a recipe', 100)
+ subparsers.add_subparser_group('info', 'Getting information')
+ subparsers.add_subparser_group('working', 'Working on a recipe in the workspace')
+ subparsers.add_subparser_group('testbuild', 'Testing changes on target')
+
+ if not context.fixed_setup:
+ parser_create_workspace = subparsers.add_parser('create-workspace',
+ help='Set up workspace in an alternative location',
+ description='Sets up a new workspace. NOTE: other devtool subcommands will create a workspace automatically as needed, so you only need to use %(prog)s if you want to specify where the workspace should be located.',
+ group='advanced')
+ parser_create_workspace.add_argument('layerpath', nargs='?', help='Path in which the workspace layer should be created')
+ parser_create_workspace.add_argument('--create-only', action="store_true", help='Only create the workspace layer, do not alter configuration')
+ parser_create_workspace.set_defaults(func=create_workspace, no_workspace=True)
+
+ for plugin in plugins:
+ if hasattr(plugin, 'register_commands'):
+ plugin.register_commands(subparsers, context)
+
+ args = parser.parse_args(unparsed_args, namespace=global_args)
+
+ if not getattr(args, 'no_workspace', False):
+ read_workspace()
+
+ try:
+ ret = args.func(args, config, basepath, workspace)
+ except DevtoolError as err:
+ if str(err):
+ logger.error(str(err))
+ ret = err.exitcode
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/distro/build-recipe-list.py b/poky/scripts/distro/build-recipe-list.py
new file mode 100755
index 000000000..216276485
--- /dev/null
+++ b/poky/scripts/distro/build-recipe-list.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+
+import os
+import shutil
+import csv
+import sys
+import argparse
+
+__version__ = "0.1.0"
+
+# set of BPNs
+recipenames = set()
+# map of recipe -> data
+allrecipes = {}
+
+def make_bpn(recipe):
+ prefixes = ("nativesdk-",)
+ suffixes = ("-native", "-cross", "-initial", "-intermediate", "-crosssdk", "-cross-canadian")
+ for ix in prefixes + suffixes:
+ if ix in recipe:
+ recipe = recipe.replace(ix, "")
+ return recipe
+
+def gather_recipes(rows):
+ for row in rows:
+ recipe = row[0]
+ bpn = make_bpn(recipe)
+ if bpn not in recipenames:
+ recipenames.add(bpn)
+ if recipe not in allrecipes:
+ allrecipes[recipe] = row
+
+def generate_recipe_list():
+ # machine list
+ machine_list = ( "qemuarm64", "qemuarm", "qemumips64", "qemumips", "qemuppc", "qemux86-64", "qemux86" )
+ # set filename format
+ fnformat = 'distrodata.%s.csv'
+
+ # store all data files in distrodata
+ datadir = 'distrodata'
+
+ # create the directory if it does not exists
+ if not os.path.exists(datadir):
+ os.mkdir(datadir)
+
+ # doing bitbake distrodata
+ for machine in machine_list:
+ os.system('MACHINE='+ machine + ' bitbake -k universe -c distrodata')
+ shutil.copy('tmp/log/distrodata.csv', 'distrodata/' + fnformat % machine)
+
+ for machine in machine_list:
+ with open('distrodata/' + fnformat % machine) as f:
+ reader = csv.reader(f)
+ rows = reader.__iter__()
+ gather_recipes(rows)
+
+ with open('recipe-list.txt', 'w') as f:
+ for recipe in sorted(recipenames):
+ f.write("%s\n" % recipe)
+ print("file : recipe-list.txt is created with %d entries." % len(recipenames))
+
+ with open('all-recipe-list.txt', 'w') as f:
+ for recipe, row in sorted(allrecipes.items()):
+ f.write("%s\n" % ','.join(row))
+
+
+def diff_for_new_recipes(recipe1, recipe2):
+ prev_recipe_path = recipe1 + '/'
+ curr_recipe_path = recipe2 + '/'
+ if not os.path.isfile(prev_recipe_path + 'recipe-list.txt') or not os.path.isfile(curr_recipe_path + 'recipe-list.txt'):
+ print("recipe files do not exists. please verify that the file exists.")
+ exit(1)
+
+ import csv
+
+ prev = []
+ new = []
+
+ with open(prev_recipe_path + 'recipe-list.txt') as f:
+ prev = f.readlines()
+
+ with open(curr_recipe_path + 'recipe-list.txt') as f:
+ new = f.readlines()
+
+ updates = []
+ for pn in new:
+ if not pn in prev:
+ updates.append(pn.rstrip())
+
+ allrecipe = []
+ with open(recipe1 + '_' + recipe2 + '_new_recipe_list.txt','w') as dr:
+ with open(curr_recipe_path + 'all-recipe-list.txt') as f:
+ reader = csv.reader(f, delimiter=',')
+ for row in reader:
+ if row[0] in updates:
+ dr.write("%s,%s,%s" % (row[0], row[3], row[5]))
+ if len(row[9:]) > 0:
+ dr.write(",%s" % ','.join(row[9:]))
+ dr.write("\n")
+
+def main(argv):
+ if argv[0] == "generate_recipe_list":
+ generate_recipe_list()
+ elif argv[0] == "compare_recipe":
+ diff_for_new_recipes(argv[1], argv[2])
+ else:
+ print("no such option. choose either 'generate_recipe_list' or 'compare_recipe'")
+
+ exit(0)
+
+if __name__ == "__main__":
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except Exception as e:
+ print("Exception :", e)
+ sys.exit(1)
+
diff --git a/poky/scripts/distro/distrocompare.sh b/poky/scripts/distro/distrocompare.sh
new file mode 100755
index 000000000..908760c23
--- /dev/null
+++ b/poky/scripts/distro/distrocompare.sh
@@ -0,0 +1,123 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2017, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# distrocompare.sh : provides capability to get a list of new packages
+# based on two distinct branches. This script takes
+# 2 parameters; either a commit-ish or a branch name
+#
+# To run : distrocompare.sh <older hash> <newer hash>
+# E.g. distrocompare.sh morty 92aa0e7
+# E.g. distrocompare.sh morty pyro
+#
+
+# get input as version
+previous_version=$1
+current_version=$2
+
+# set previous and current version
+if [ -z "$2" ]; then
+ previous_version=$1
+ current_version="current"
+fi
+
+# get script location. That's where the source supposedly located as well.
+scriptdir="$( realpath $(dirname "${BASH_SOURCE[0]}" ))"
+sourcedir="$( realpath $scriptdir/../.. )"
+
+# create working directory
+workdir=$(mktemp -d)
+
+# prepare to rollback to the branch if not similar
+branch=`cd $sourcedir; git branch | grep \* | cut -d ' ' -f2`
+
+# set current workdir to store final result
+currentworkdir=`pwd`
+
+# persists the file after local repo change
+cp $scriptdir/build-recipe-list.py $workdir
+
+#==================================================================
+
+function bake_distrodata {
+ # get to source directory of the git
+ cd $sourcedir
+
+ # change the branch / commit. Do not change if input is current
+ if [ "$1" != "current" ]; then
+ output=$(git checkout $1 2>&1)
+
+ # exit if git fails
+ if [[ $output == *"error"* ]]; then
+ echo "git error : $output"
+ echo "exiting ... "
+ rm -rf $workdir
+ exit
+ fi
+ fi
+
+ # make tmp as workdir
+ cd $workdir
+
+ # source oe-init to generate a new build folder
+ source $sourcedir/oe-init-build-env $1
+
+ # if file already exists with distrodata, do not append
+ if ! grep -q "distrodata" "conf/local.conf"; then
+ # add inherit distrodata to local.conf to enable distrodata feature
+ echo 'INHERIT += "distrodata"' >> conf/local.conf
+ fi
+
+ # use from tmp
+ $workdir/build-recipe-list.py generate_recipe_list
+}
+
+bake_distrodata $previous_version
+bake_distrodata $current_version
+
+#==================================================================
+
+cd $workdir
+
+# compare the 2 generated recipe-list.txt
+$workdir/build-recipe-list.py compare_recipe $previous_version $current_version
+
+# copy final result to current working directory
+cp $workdir/*_new_recipe_list.txt $currentworkdir
+
+if [ $? -ne 0 ]; then
+ rm -rf $workdir/$previous_version
+ rm -rf $workdir/$current_version
+ rm $workdir/build-recipe-list.py
+ # preserve the result in /tmp/distrodata if fail to copy the result over
+ exit
+fi
+
+# cleanup
+rm -rf $workdir
+
+# perform rollback branch
+cd $sourcedir
+currentbranch=`git branch | grep \* | cut -d ' ' -f2`
+if [ "$currentbranch" != "$branch" ]; then
+ git checkout $branch
+fi
+
+cd $currentworkdir
+
+#==================================================================
diff --git a/poky/scripts/gen-lockedsig-cache b/poky/scripts/gen-lockedsig-cache
new file mode 100755
index 000000000..6765891d1
--- /dev/null
+++ b/poky/scripts/gen-lockedsig-cache
@@ -0,0 +1,74 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import glob
+import shutil
+import errno
+
+def mkdir(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+if len(sys.argv) < 5:
+ print("Incorrect number of arguments specified")
+ print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
+ sys.exit(1)
+
+filterlist = []
+if len(sys.argv) > 5:
+ print('Reading filter file %s' % sys.argv[5])
+ with open(sys.argv[5]) as f:
+ for l in f.readlines():
+ if ":" in l:
+ filterlist.append(l.rstrip())
+
+print('Reading %s' % sys.argv[1])
+sigs = []
+with open(sys.argv[1]) as f:
+ for l in f.readlines():
+ if ":" in l:
+ task, sig = l.split()[0].rsplit(':', 1)
+ if filterlist and not task in filterlist:
+ print('Filtering out %s' % task)
+ else:
+ sigs.append(sig)
+
+print('Gathering file list')
+files = set()
+for s in sigs:
+ p = sys.argv[2] + "/" + s[:2] + "/*" + s + "*"
+ files |= set(glob.glob(p))
+ p = sys.argv[2] + "/%s/" % sys.argv[4] + s[:2] + "/*" + s + "*"
+ files |= set(glob.glob(p))
+
+print('Processing files')
+for f in files:
+ sys.stdout.write('Processing %s... ' % f)
+ _, ext = os.path.splitext(f)
+ if not ext in ['.tgz', '.siginfo', '.sig']:
+ # Most likely a temp file, skip it
+ print('skipping')
+ continue
+ dst = os.path.join(sys.argv[3], os.path.relpath(f, sys.argv[2]))
+ destdir = os.path.dirname(dst)
+ mkdir(destdir)
+
+ src = os.path.realpath(f)
+ if os.path.exists(dst):
+ os.remove(dst)
+ if (os.stat(src).st_dev == os.stat(destdir).st_dev):
+ print('linking')
+ try:
+ os.link(src, dst)
+ except OSError as e:
+ print('hard linking failed, copying')
+ shutil.copyfile(src, dst)
+ else:
+ print('copying')
+ shutil.copyfile(src, dst)
+
+print('Done!')
diff --git a/poky/scripts/gen-site-config b/poky/scripts/gen-site-config
new file mode 100755
index 000000000..7da7a0bd8
--- /dev/null
+++ b/poky/scripts/gen-site-config
@@ -0,0 +1,53 @@
+#! /bin/sh
+# Copyright (c) 2005-2008 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+cat << EOF
+AC_PREREQ(2.57)
+AC_INIT([site_wide],[1.0.0])
+
+EOF
+
+# Disable as endian is set in the default config
+#echo AC_C_BIGENDIAN
+#echo
+
+if [ -e $1/types ] ; then
+ while read type ; do
+ echo "AC_CHECK_SIZEOF([$type])"
+ done < $1/types
+
+ echo
+fi
+
+if [ -e $1/funcs ]; then
+ while read func ; do
+ echo "AC_CHECK_FUNCS([$func])"
+ done < $1/funcs
+
+ echo
+fi
+
+if [ -e $1/headers ]; then
+ while read header ; do
+ echo "AC_CHECK_HEADERS([$header])"
+ done < $1/headers
+
+ echo
+fi
+
+cat << EOF
+AC_OUTPUT
+EOF
diff --git a/poky/scripts/lib/argparse_oe.py b/poky/scripts/lib/argparse_oe.py
new file mode 100644
index 000000000..9bdfc1cec
--- /dev/null
+++ b/poky/scripts/lib/argparse_oe.py
@@ -0,0 +1,176 @@
+import sys
+import argparse
+from collections import defaultdict, OrderedDict
+
+class ArgumentUsageError(Exception):
+ """Exception class you can raise (and catch) in order to show the help"""
+ def __init__(self, message, subcommand=None):
+ self.message = message
+ self.subcommand = subcommand
+
+class ArgumentParser(argparse.ArgumentParser):
+ """Our own version of argparse's ArgumentParser"""
+ def __init__(self, *args, **kwargs):
+ kwargs.setdefault('formatter_class', OeHelpFormatter)
+ self._subparser_groups = OrderedDict()
+ super(ArgumentParser, self).__init__(*args, **kwargs)
+ self._positionals.title = 'arguments'
+ self._optionals.title = 'options'
+
+ def error(self, message):
+ """error(message: string)
+
+ Prints a help message incorporating the message to stderr and
+ exits.
+ """
+ self._print_message('%s: error: %s\n' % (self.prog, message), sys.stderr)
+ self.print_help(sys.stderr)
+ sys.exit(2)
+
+ def error_subcommand(self, message, subcommand):
+ if subcommand:
+ action = self._get_subparser_action()
+ try:
+ subparser = action._name_parser_map[subcommand]
+ except KeyError:
+ self.error('no subparser for name "%s"' % subcommand)
+ else:
+ subparser.error(message)
+
+ self.error(message)
+
+ def add_subparsers(self, *args, **kwargs):
+ if 'dest' not in kwargs:
+ kwargs['dest'] = '_subparser_name'
+
+ ret = super(ArgumentParser, self).add_subparsers(*args, **kwargs)
+ # Need a way of accessing the parent parser
+ ret._parent_parser = self
+ # Ensure our class gets instantiated
+ ret._parser_class = ArgumentSubParser
+ # Hacky way of adding a method to the subparsers object
+ ret.add_subparser_group = self.add_subparser_group
+ return ret
+
+ def add_subparser_group(self, groupname, groupdesc, order=0):
+ self._subparser_groups[groupname] = (groupdesc, order)
+
+ def parse_args(self, args=None, namespace=None):
+ """Parse arguments, using the correct subparser to show the error."""
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ message = 'unrecognized arguments: %s' % ' '.join(argv)
+ if self._subparsers:
+ subparser = self._get_subparser(args)
+ subparser.error(message)
+ else:
+ self.error(message)
+ sys.exit(2)
+ return args
+
+ def _get_subparser(self, args):
+ action = self._get_subparser_action()
+ if action.dest == argparse.SUPPRESS:
+ self.error('cannot get subparser, the subparser action dest is suppressed')
+
+ name = getattr(args, action.dest)
+ try:
+ return action._name_parser_map[name]
+ except KeyError:
+ self.error('no subparser for name "%s"' % name)
+
+ def _get_subparser_action(self):
+ if not self._subparsers:
+ self.error('cannot return the subparser action, no subparsers added')
+
+ for action in self._subparsers._group_actions:
+ if isinstance(action, argparse._SubParsersAction):
+ return action
+
+
+class ArgumentSubParser(ArgumentParser):
+ def __init__(self, *args, **kwargs):
+ if 'group' in kwargs:
+ self._group = kwargs.pop('group')
+ if 'order' in kwargs:
+ self._order = kwargs.pop('order')
+ super(ArgumentSubParser, self).__init__(*args, **kwargs)
+
+ def parse_known_args(self, args=None, namespace=None):
+ # This works around argparse not handling optional positional arguments being
+ # intermixed with other options. A pretty horrible hack, but we're not left
+ # with much choice given that the bug in argparse exists and it's difficult
+ # to subclass.
+ # Borrowed from http://stackoverflow.com/questions/20165843/argparse-how-to-handle-variable-number-of-arguments-nargs
+ # with an extra workaround (in format_help() below) for the positional
+ # arguments disappearing from the --help output, as well as structural tweaks.
+ # Originally simplified from http://bugs.python.org/file30204/test_intermixed.py
+ positionals = self._get_positional_actions()
+ for action in positionals:
+ # deactivate positionals
+ action.save_nargs = action.nargs
+ action.nargs = 0
+
+ namespace, remaining_args = super(ArgumentSubParser, self).parse_known_args(args, namespace)
+ for action in positionals:
+ # remove the empty positional values from namespace
+ if hasattr(namespace, action.dest):
+ delattr(namespace, action.dest)
+ for action in positionals:
+ action.nargs = action.save_nargs
+ # parse positionals
+ namespace, extras = super(ArgumentSubParser, self).parse_known_args(remaining_args, namespace)
+ return namespace, extras
+
+ def format_help(self):
+ # Quick, restore the positionals!
+ positionals = self._get_positional_actions()
+ for action in positionals:
+ if hasattr(action, 'save_nargs'):
+ action.nargs = action.save_nargs
+ return super(ArgumentParser, self).format_help()
+
+
+class OeHelpFormatter(argparse.HelpFormatter):
+ def _format_action(self, action):
+ if hasattr(action, '_get_subactions'):
+ # subcommands list
+ groupmap = defaultdict(list)
+ ordermap = {}
+ subparser_groups = action._parent_parser._subparser_groups
+ groups = sorted(subparser_groups.keys(), key=lambda item: subparser_groups[item][1], reverse=True)
+ for subaction in self._iter_indented_subactions(action):
+ parser = action._name_parser_map[subaction.dest]
+ group = getattr(parser, '_group', None)
+ groupmap[group].append(subaction)
+ if group not in groups:
+ groups.append(group)
+ order = getattr(parser, '_order', 0)
+ ordermap[subaction.dest] = order
+
+ lines = []
+ if len(groupmap) > 1:
+ groupindent = ' '
+ else:
+ groupindent = ''
+ for group in groups:
+ subactions = groupmap[group]
+ if not subactions:
+ continue
+ if groupindent:
+ if not group:
+ group = 'other'
+ groupdesc = subparser_groups.get(group, (group, 0))[0]
+ lines.append(' %s:' % groupdesc)
+ for subaction in sorted(subactions, key=lambda item: ordermap[item.dest], reverse=True):
+ lines.append('%s%s' % (groupindent, self._format_action(subaction).rstrip()))
+ return '\n'.join(lines)
+ else:
+ return super(OeHelpFormatter, self)._format_action(action)
+
+def int_positive(value):
+ ivalue = int(value)
+ if ivalue <= 0:
+ raise argparse.ArgumentTypeError(
+ "%s is not a positive int value" % value)
+ return ivalue
diff --git a/poky/scripts/lib/build_perf/__init__.py b/poky/scripts/lib/build_perf/__init__.py
new file mode 100644
index 000000000..1f8b72907
--- /dev/null
+++ b/poky/scripts/lib/build_perf/__init__.py
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Build performance test library functions"""
+
+def print_table(rows, row_fmt=None):
+ """Print data table"""
+ if not rows:
+ return
+ if not row_fmt:
+ row_fmt = ['{:{wid}} '] * len(rows[0])
+
+ # Go through the data to get maximum cell widths
+ num_cols = len(row_fmt)
+ col_widths = [0] * num_cols
+ for row in rows:
+ for i, val in enumerate(row):
+ col_widths[i] = max(col_widths[i], len(str(val)))
+
+ for row in rows:
+ print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])
+
diff --git a/poky/scripts/lib/build_perf/html.py b/poky/scripts/lib/build_perf/html.py
new file mode 100644
index 000000000..578bb162e
--- /dev/null
+++ b/poky/scripts/lib/build_perf/html.py
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Helper module for HTML reporting"""
+from jinja2 import Environment, PackageLoader
+
+
+env = Environment(loader=PackageLoader('build_perf', 'html'))
+
+template = env.get_template('report.html')
diff --git a/poky/scripts/lib/build_perf/html/measurement_chart.html b/poky/scripts/lib/build_perf/html/measurement_chart.html
new file mode 100644
index 000000000..65f1a227a
--- /dev/null
+++ b/poky/scripts/lib/build_perf/html/measurement_chart.html
@@ -0,0 +1,50 @@
+<script type="text/javascript">
+ chartsDrawing += 1;
+ google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }});
+ function drawChart_{{ chart_elem_id }}() {
+ var data = new google.visualization.DataTable();
+
+ // Chart options
+ var options = {
+ theme : 'material',
+ legend: 'none',
+ hAxis: { format: '', title: 'Commit number',
+ minValue: {{ chart_opts.haxis.min }},
+ maxValue: {{ chart_opts.haxis.max }} },
+ {% if measurement.type == 'time' %}
+ vAxis: { format: 'h:mm:ss' },
+ {% else %}
+ vAxis: { format: '' },
+ {% endif %}
+ pointSize: 5,
+ chartArea: { left: 80, right: 15 },
+ };
+
+ // Define data columns
+ data.addColumn('number', 'Commit');
+ data.addColumn('{{ measurement.value_type.gv_data_type }}',
+ '{{ measurement.value_type.quantity }}');
+ // Add data rows
+ data.addRows([
+ {% for sample in measurement.samples %}
+ [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}],
+ {% endfor %}
+ ]);
+
+ // Finally, draw the chart
+ chart_div = document.getElementById('{{ chart_elem_id }}');
+ var chart = new google.visualization.LineChart(chart_div);
+ google.visualization.events.addListener(chart, 'ready', function () {
+ //chart_div = document.getElementById('{{ chart_elem_id }}');
+ //chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">';
+ png_div = document.getElementById('{{ chart_elem_id }}_png');
+ png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>';
+ console.log("CHART READY: {{ chart_elem_id }}");
+ chartsDrawing -= 1;
+ if (chartsDrawing == 0)
+ console.log("ALL CHARTS READY");
+ });
+ chart.draw(data, options);
+}
+</script>
+
diff --git a/poky/scripts/lib/build_perf/html/report.html b/poky/scripts/lib/build_perf/html/report.html
new file mode 100644
index 000000000..291ad9d72
--- /dev/null
+++ b/poky/scripts/lib/build_perf/html/report.html
@@ -0,0 +1,286 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+{# Scripts, for visualization#}
+<!--START-OF-SCRIPTS-->
+<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="text/javascript">
+google.charts.load('current', {'packages':['corechart']});
+var chartsDrawing = 0;
+</script>
+
+{# Render measurement result charts #}
+{% for test in test_data %}
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
+ {% include 'measurement_chart.html' %}
+ {% endfor %}
+ {% endif %}
+{% endfor %}
+
+<!--END-OF-SCRIPTS-->
+
+{# Styles #}
+<style>
+.meta-table {
+ font-size: 14px;
+ text-align: left;
+ border-collapse: collapse;
+}
+.meta-table tr:nth-child(even){background-color: #f2f2f2}
+meta-table th, .meta-table td {
+ padding: 4px;
+}
+.summary {
+ margin: 0;
+ font-size: 14px;
+ text-align: left;
+ border-collapse: collapse;
+}
+summary th, .meta-table td {
+ padding: 4px;
+}
+.measurement {
+ padding: 8px 0px 8px 8px;
+ border: 2px solid #f0f0f0;
+ margin-bottom: 10px;
+}
+.details {
+ margin: 0;
+ font-size: 12px;
+ text-align: left;
+ border-collapse: collapse;
+}
+.details th {
+ padding-right: 8px;
+}
+.details.plain th {
+ font-weight: normal;
+}
+.preformatted {
+ font-family: monospace;
+ white-space: pre-wrap;
+ background-color: #f0f0f0;
+ margin-left: 10px;
+}
+hr {
+ color: #f0f0f0;
+}
+h2 {
+ font-size: 20px;
+ margin-bottom: 0px;
+ color: #707070;
+}
+h3 {
+ font-size: 16px;
+ margin: 0px;
+ color: #707070;
+}
+</style>
+
+<title>{{ title }}</title>
+</head>
+
+{% macro poky_link(commit) -%}
+ <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
+{%- endmacro %}
+
+<body><div style="width: 700px">
+ {# Test metadata #}
+ <h2>General</h2>
+ <hr>
+ <table class="meta-table" style="width: 100%">
+ <tr>
+ <th></th>
+ <th>Current commit</th>
+ <th>Comparing with</th>
+ </tr>
+ {% for key, item in metadata.items() %}
+ <tr>
+ <th>{{ item.title }}</th>
+ {%if key == 'commit' %}
+ <td>{{ poky_link(item.value) }}</td>
+ <td>{{ poky_link(item.value_old) }}</td>
+ {% else %}
+ <td>{{ item.value }}</td>
+ <td>{{ item.value_old }}</td>
+ {% endif %}
+ </tr>
+ {% endfor %}
+ </table>
+
+ {# Test result summary #}
+ <h2>Test result summary</h2>
+ <hr>
+ <table class="summary" style="width: 100%">
+ {% for test in test_data %}
+ {% if loop.index is even %}
+ {% set row_style = 'style="background-color: #f2f2f2"' %}
+ {% else %}
+ {% set row_style = 'style="background-color: #ffffff"' %}
+ {% endif %}
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ <tr {{ row_style }}>
+ {% if loop.index == 1 %}
+ <td>{{ test.name }}: {{ test.description }}</td>
+ {% else %}
+ {# add empty cell in place of the test name#}
+ <td></td>
+ {% endif %}
+ {% if measurement.absdiff > 0 %}
+ {% set result_style = "color: red" %}
+ {% elif measurement.absdiff == measurement.absdiff %}
+ {% set result_style = "color: green" %}
+ {% else %}
+ {% set result_style = "color: orange" %}
+ {%endif %}
+ <td>{{ measurement.description }}</td>
+ <td style="font-weight: bold">{{ measurement.value.mean }}</td>
+ <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
+ <td style="{{ result_style }}">{{ measurement.reldiff }}</td>
+ </tr>
+ {% endfor %}
+ {% else %}
+ <td style="font-weight: bold; color: red;">{{test.status }}</td>
+ <td></td> <td></td> <td></td> <td></td>
+ {% endif %}
+ {% endfor %}
+ </table>
+
+ {# Detailed test results #}
+ {% for test in test_data %}
+ <h2>{{ test.name }}: {{ test.description }}</h2>
+ <hr>
+ {% if test.status == 'SUCCESS' %}
+ {% for measurement in test.measurements %}
+ <div class="measurement">
+ <h3>{{ measurement.description }}</h3>
+ <div style="font-weight:bold;">
+ <span style="font-size: 23px;">{{ measurement.value.mean }}</span>
+ <span style="font-size: 20px; margin-left: 12px">
+ {% if measurement.absdiff > 0 %}
+ <span style="color: red">
+ {% elif measurement.absdiff == measurement.absdiff %}
+ <span style="color: green">
+ {% else %}
+ <span style="color: orange">
+ {% endif %}
+ {{ measurement.absdiff_str }} ({{measurement.reldiff}})
+ </span></span>
+ </div>
+ {# Table for trendchart and the statistics #}
+ <table style="width: 100%">
+ <tr>
+ <td style="width: 75%">
+ {# Linechart #}
+ <div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
+ </td>
+ <td>
+ {# Measurement statistics #}
+ <table class="details plain">
+ <tr>
+ <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
+ </tr><tr>
+ <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
+ </tr><tr>
+ <th>Min</th><td>{{ measurement.value.min }}</td>
+ </tr><tr>
+ <th>Max</th><td>{{ measurement.value.max }}</td>
+ </tr><tr>
+ <th>Stdev</th><td>{{ measurement.value.stdev }}</td>
+ </tr><tr>
+ <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
+ <td></td>
+ </tr>
+ </table>
+ </td>
+ </tr>
+ </table>
+
+ {# Task and recipe summary from buildstats #}
+ {% if 'buildstats' in measurement %}
+ Task resource usage
+ <table class="details" style="width:100%">
+ <tr>
+ <th>Number of tasks</th>
+ <th>Top consumers of cputime</th>
+ </tr>
+ <tr>
+ <td style="vertical-align: top">{{ measurement.buildstats.tasks.count }} ({{ measurement.buildstats.tasks.change }})</td>
+ {# Table of most resource-hungry tasks #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_consumer|reverse %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%0.0f' % diff.value2 }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <th>Biggest increase in cputime</th>
+ <th>Biggest decrease in cputime</th>
+ </tr>
+ <tr>
+ {# Table biggest increase in resource usage #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_increase|reverse %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%+0.0f' % diff.absdiff }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ {# Table biggest decrease in resource usage #}
+ <td>
+ <table class="details plain">
+ {% for diff in measurement.buildstats.top_decrease %}
+ <tr>
+ <th>{{ diff.pkg }}.{{ diff.task }}</th>
+ <td>{{ '%+0.0f' % diff.absdiff }} s</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </td>
+ </tr>
+ </table>
+
+ {# Recipe version differences #}
+ {% if measurement.buildstats.ver_diff %}
+ <div style="margin-top: 16px">Recipe version changes</div>
+ <table class="details">
+ {% for head, recipes in measurement.buildstats.ver_diff.items() %}
+ <tr>
+ <th colspan="2">{{ head }}</th>
+ </tr>
+ {% for name, info in recipes|sort %}
+ <tr>
+ <td>{{ name }}</td>
+ <td>{{ info }}</td>
+ </tr>
+ {% endfor %}
+ {% endfor %}
+ </table>
+ {% else %}
+ <div style="margin-top: 16px">No recipe version changes detected</div>
+ {% endif %}
+ {% endif %}
+ </div>
+ {% endfor %}
+ {# Unsuccessful test #}
+ {% else %}
+ <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
+ {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
+ </span>
+ <div class="preformatted">{{ test.message }}</div>
+ {% endif %}
+ {% endfor %}
+</div></body>
+</html>
+
diff --git a/poky/scripts/lib/build_perf/report.py b/poky/scripts/lib/build_perf/report.py
new file mode 100644
index 000000000..d99a36797
--- /dev/null
+++ b/poky/scripts/lib/build_perf/report.py
@@ -0,0 +1,345 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Handling of build perf test reports"""
+from collections import OrderedDict, Mapping, namedtuple
+from datetime import datetime, timezone
+from numbers import Number
+from statistics import mean, stdev, variance
+
+
+AggregateTestData = namedtuple('AggregateTestData', ['metadata', 'results'])
+
+
+def isofmt_to_timestamp(string):
+ """Convert timestamp string in ISO 8601 format into unix timestamp"""
+ if '.' in string:
+ dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
+ else:
+ dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
+ return dt.replace(tzinfo=timezone.utc).timestamp()
+
+
+def metadata_xml_to_json(elem):
+ """Convert metadata xml into JSON format"""
+ assert elem.tag == 'metadata', "Invalid metadata file format"
+
+ def _xml_to_json(elem):
+ """Convert xml element to JSON object"""
+ out = OrderedDict()
+ for child in elem.getchildren():
+ key = child.attrib.get('name', child.tag)
+ if len(child):
+ out[key] = _xml_to_json(child)
+ else:
+ out[key] = child.text
+ return out
+ return _xml_to_json(elem)
+
+
+def results_xml_to_json(elem):
+ """Convert results xml into JSON format"""
+ rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
+ 'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
+ 'ru_nivcsw')
+ iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
+ 'write_bytes', 'cancelled_write_bytes')
+
+ def _read_measurement(elem):
+ """Convert measurement to JSON"""
+ data = OrderedDict()
+ data['type'] = elem.tag
+ data['name'] = elem.attrib['name']
+ data['legend'] = elem.attrib['legend']
+ values = OrderedDict()
+
+ # SYSRES measurement
+ if elem.tag == 'sysres':
+ for subel in elem:
+ if subel.tag == 'time':
+ values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
+ values['elapsed_time'] = float(subel.text)
+ elif subel.tag == 'rusage':
+ rusage = OrderedDict()
+ for field in rusage_fields:
+ if 'time' in field:
+ rusage[field] = float(subel.attrib[field])
+ else:
+ rusage[field] = int(subel.attrib[field])
+ values['rusage'] = rusage
+ elif subel.tag == 'iostat':
+ values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
+ for f in iostat_fields])
+ elif subel.tag == 'buildstats_file':
+ values['buildstats_file'] = subel.text
+ else:
+ raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
+ # DISKUSAGE measurement
+ elif elem.tag == 'diskusage':
+ values['size'] = int(elem.find('size').text)
+ else:
+ raise Exception("Unknown measurement tag '{}'".format(elem.tag))
+ data['values'] = values
+ return data
+
+ def _read_testcase(elem):
+ """Convert testcase into JSON"""
+ assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
+
+ data = OrderedDict()
+ data['name'] = elem.attrib['name']
+ data['description'] = elem.attrib['description']
+ data['status'] = 'SUCCESS'
+ data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+ data['elapsed_time'] = float(elem.attrib['time'])
+ measurements = OrderedDict()
+
+ for subel in elem.getchildren():
+ if subel.tag == 'error' or subel.tag == 'failure':
+ data['status'] = subel.tag.upper()
+ data['message'] = subel.attrib['message']
+ data['err_type'] = subel.attrib['type']
+ data['err_output'] = subel.text
+ elif subel.tag == 'skipped':
+ data['status'] = 'SKIPPED'
+ data['message'] = subel.text
+ else:
+ measurements[subel.attrib['name']] = _read_measurement(subel)
+ data['measurements'] = measurements
+ return data
+
+ def _read_testsuite(elem):
+ """Convert suite to JSON"""
+ assert elem.tag == 'testsuite', \
+ "Expecting 'testsuite' element instead of {}".format(elem.tag)
+
+ data = OrderedDict()
+ if 'hostname' in elem.attrib:
+ data['tester_host'] = elem.attrib['hostname']
+ data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+ data['elapsed_time'] = float(elem.attrib['time'])
+ tests = OrderedDict()
+
+ for case in elem.getchildren():
+ tests[case.attrib['name']] = _read_testcase(case)
+ data['tests'] = tests
+ return data
+
+ # Main function
+ assert elem.tag == 'testsuites', "Invalid test report format"
+ assert len(elem) == 1, "Too many testsuites"
+
+ return _read_testsuite(elem.getchildren()[0])
+
+
+def aggregate_metadata(metadata):
+ """Aggregate metadata into one, basically a sanity check"""
+ mutable_keys = ('pretty_name', 'version_id')
+
+ def aggregate_obj(aggregate, obj, assert_str=True):
+ """Aggregate objects together"""
+ assert type(aggregate) is type(obj), \
+ "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+ if isinstance(obj, Mapping):
+ assert set(aggregate.keys()) == set(obj.keys())
+ for key, val in obj.items():
+ aggregate_obj(aggregate[key], val, key not in mutable_keys)
+ elif isinstance(obj, list):
+ assert len(aggregate) == len(obj)
+ for i, val in enumerate(obj):
+ aggregate_obj(aggregate[i], val)
+ elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
+ assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+
+ if not metadata:
+ return {}
+
+ # Do the aggregation
+ aggregate = metadata[0].copy()
+ for testrun in metadata[1:]:
+ aggregate_obj(aggregate, testrun)
+ aggregate['testrun_count'] = len(metadata)
+ return aggregate
+
+
+def aggregate_data(data):
+ """Aggregate multiple test results JSON structures into one"""
+
+ mutable_keys = ('status', 'message', 'err_type', 'err_output')
+
+ class SampleList(list):
+ """Container for numerical samples"""
+ pass
+
+ def new_aggregate_obj(obj):
+ """Create new object for aggregate"""
+ if isinstance(obj, Number):
+ new_obj = SampleList()
+ new_obj.append(obj)
+ elif isinstance(obj, str):
+ new_obj = obj
+ else:
+ # Lists and and dicts are kept as is
+ new_obj = obj.__class__()
+ aggregate_obj(new_obj, obj)
+ return new_obj
+
+ def aggregate_obj(aggregate, obj, assert_str=True):
+ """Recursive "aggregation" of JSON objects"""
+ if isinstance(obj, Number):
+ assert isinstance(aggregate, SampleList)
+ aggregate.append(obj)
+ return
+
+ assert type(aggregate) == type(obj), \
+ "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+ if isinstance(obj, Mapping):
+ for key, val in obj.items():
+ if not key in aggregate:
+ aggregate[key] = new_aggregate_obj(val)
+ else:
+ aggregate_obj(aggregate[key], val, key not in mutable_keys)
+ elif isinstance(obj, list):
+ for i, val in enumerate(obj):
+ if i >= len(aggregate):
+ aggregate[key] = new_aggregate_obj(val)
+ else:
+ aggregate_obj(aggregate[i], val)
+ elif isinstance(obj, str):
+ # Sanity check for data
+ if assert_str:
+ assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+ else:
+ raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
+
+ if not data:
+ return {}
+
+ # Do the aggregation
+ aggregate = data[0].__class__()
+ for testrun in data:
+ aggregate_obj(aggregate, testrun)
+ return aggregate
+
+
+class MeasurementVal(float):
+ """Base class representing measurement values"""
+ gv_data_type = 'number'
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ else:
+ return self
+
+
+class TimeVal(MeasurementVal):
+ """Class representing time values"""
+ quantity = 'time'
+ gv_title = 'elapsed time'
+ gv_data_type = 'timeofday'
+
+ def hms(self):
+ """Split time into hours, minutes and seconeds"""
+ hhh = int(abs(self) / 3600)
+ mmm = int((abs(self) % 3600) / 60)
+ sss = abs(self) % 60
+ return hhh, mmm, sss
+
+ def __str__(self):
+ if self != self:
+ return "nan"
+ hh, mm, ss = self.hms()
+ sign = '-' if self < 0 else ''
+ if hh > 0:
+ return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
+ elif mm > 0:
+ return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
+ elif ss > 1:
+ return '{}{:.1f} s'.format(sign, ss)
+ else:
+ return '{}{:.2f} s'.format(sign, ss)
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ hh, mm, ss = self.hms()
+ return [hh, mm, int(ss), int(ss*1000) % 1000]
+
+
+class SizeVal(MeasurementVal):
+ """Class representing time values"""
+ quantity = 'size'
+ gv_title = 'size in MiB'
+ gv_data_type = 'number'
+
+ def __str__(self):
+ if self != self:
+ return "nan"
+ if abs(self) < 1024:
+ return '{:.1f} kiB'.format(self)
+ elif abs(self) < 1048576:
+ return '{:.2f} MiB'.format(self / 1024)
+ else:
+ return '{:.2f} GiB'.format(self / 1048576)
+
+ def gv_value(self):
+ """Value formatting for visualization"""
+ if self != self:
+ return "null"
+ return self / 1024
+
+def measurement_stats(meas, prefix=''):
+ """Get statistics of a measurement"""
+ if not meas:
+ return {prefix + 'sample_cnt': 0,
+ prefix + 'mean': MeasurementVal('nan'),
+ prefix + 'stdev': MeasurementVal('nan'),
+ prefix + 'variance': MeasurementVal('nan'),
+ prefix + 'min': MeasurementVal('nan'),
+ prefix + 'max': MeasurementVal('nan'),
+ prefix + 'minus': MeasurementVal('nan'),
+ prefix + 'plus': MeasurementVal('nan')}
+
+ stats = {'name': meas['name']}
+ if meas['type'] == 'sysres':
+ val_cls = TimeVal
+ values = meas['values']['elapsed_time']
+ elif meas['type'] == 'diskusage':
+ val_cls = SizeVal
+ values = meas['values']['size']
+ else:
+ raise Exception("Unknown measurement type '{}'".format(meas['type']))
+ stats['val_cls'] = val_cls
+ stats['quantity'] = val_cls.quantity
+ stats[prefix + 'sample_cnt'] = len(values)
+
+ mean_val = val_cls(mean(values))
+ min_val = val_cls(min(values))
+ max_val = val_cls(max(values))
+
+ stats[prefix + 'mean'] = mean_val
+ if len(values) > 1:
+ stats[prefix + 'stdev'] = val_cls(stdev(values))
+ stats[prefix + 'variance'] = val_cls(variance(values))
+ else:
+ stats[prefix + 'stdev'] = float('nan')
+ stats[prefix + 'variance'] = float('nan')
+ stats[prefix + 'min'] = min_val
+ stats[prefix + 'max'] = max_val
+ stats[prefix + 'minus'] = val_cls(mean_val - min_val)
+ stats[prefix + 'plus'] = val_cls(max_val - mean_val)
+
+ return stats
+
diff --git a/poky/scripts/lib/build_perf/scrape-html-report.js b/poky/scripts/lib/build_perf/scrape-html-report.js
new file mode 100644
index 000000000..05a1f5700
--- /dev/null
+++ b/poky/scripts/lib/build_perf/scrape-html-report.js
@@ -0,0 +1,56 @@
+var fs = require('fs');
+var system = require('system');
+var page = require('webpage').create();
+
+// Examine console log for message from chart drawing
+page.onConsoleMessage = function(msg) {
+ console.log(msg);
+ if (msg === "ALL CHARTS READY") {
+ window.charts_ready = true;
+ }
+ else if (msg.slice(0, 11) === "CHART READY") {
+ var chart_id = msg.split(" ")[2];
+ console.log('grabbing ' + chart_id);
+ var png_data = page.evaluate(function (chart_id) {
+ var chart_div = document.getElementById(chart_id + '_png');
+ return chart_div.outerHTML;
+ }, chart_id);
+ fs.write(args[2] + '/' + chart_id + '.png', png_data, 'w');
+ }
+};
+
+// Check command line arguments
+var args = system.args;
+if (args.length != 3) {
+ console.log("USAGE: " + args[0] + " REPORT_HTML OUT_DIR\n");
+ phantom.exit(1);
+}
+
+// Open the web page
+page.open(args[1], function(status) {
+ if (status == 'fail') {
+ console.log("Failed to open file '" + args[1] + "'");
+ phantom.exit(1);
+ }
+});
+
+// Check status every 100 ms
+interval = window.setInterval(function () {
+ //console.log('waiting');
+ if (window.charts_ready) {
+ clearTimeout(timer);
+ clearInterval(interval);
+
+ var fname = args[1].replace(/\/+$/, "").split("/").pop()
+ console.log("saving " + fname);
+ fs.write(args[2] + '/' + fname, page.content, 'w');
+ phantom.exit(0);
+ }
+}, 100);
+
+// Time-out after 10 seconds
+timer = window.setTimeout(function () {
+ clearInterval(interval);
+ console.log("ERROR: timeout");
+ phantom.exit(1);
+}, 10000);
diff --git a/poky/scripts/lib/buildstats.py b/poky/scripts/lib/buildstats.py
new file mode 100644
index 000000000..d9aadf3cb
--- /dev/null
+++ b/poky/scripts/lib/buildstats.py
@@ -0,0 +1,349 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Functionality for analyzing buildstats"""
+import json
+import logging
+import os
+import re
+from collections import namedtuple,OrderedDict
+from statistics import mean
+
+
+log = logging.getLogger()
+
+
+taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
+ 'absdiff', 'reldiff')
+TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
+
+
+class BSError(Exception):
+ """Error handling of buildstats"""
+ pass
+
+
+class BSTask(dict):
+ def __init__(self, *args, **kwargs):
+ self['start_time'] = None
+ self['elapsed_time'] = None
+ self['status'] = None
+ self['iostat'] = {}
+ self['rusage'] = {}
+ self['child_rusage'] = {}
+ super(BSTask, self).__init__(*args, **kwargs)
+
+ @property
+ def cputime(self):
+ """Sum of user and system time taken by the task"""
+ rusage = self['rusage']['ru_stime'] + self['rusage']['ru_utime']
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return rusage + self['child_rusage']['ru_stime'] + self['child_rusage']['ru_utime']
+ else:
+ return rusage
+
+ @property
+ def walltime(self):
+ """Elapsed wall clock time"""
+ return self['elapsed_time']
+
+ @property
+ def read_bytes(self):
+ """Bytes read from the block layer"""
+ return self['iostat']['read_bytes']
+
+ @property
+ def write_bytes(self):
+ """Bytes written to the block layer"""
+ return self['iostat']['write_bytes']
+
+ @property
+ def read_ops(self):
+ """Number of read operations on the block layer"""
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return self['rusage']['ru_inblock'] + self['child_rusage']['ru_inblock']
+ else:
+ return self['rusage']['ru_inblock']
+
+ @property
+ def write_ops(self):
+ """Number of write operations on the block layer"""
+ if self['child_rusage']:
+ # Child rusage may have been optimized out
+ return self['rusage']['ru_oublock'] + self['child_rusage']['ru_oublock']
+ else:
+ return self['rusage']['ru_oublock']
+
+ @classmethod
+ def from_file(cls, buildstat_file):
+ """Read buildstat text file"""
+ bs_task = cls()
+ log.debug("Reading task buildstats from %s", buildstat_file)
+ end_time = None
+ with open(buildstat_file) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Started':
+ start_time = float(val)
+ bs_task['start_time'] = start_time
+ elif key == 'Ended':
+ end_time = float(val)
+ elif key.startswith('IO '):
+ split = key.split()
+ bs_task['iostat'][split[1]] = int(val)
+ elif key.find('rusage') >= 0:
+ split = key.split()
+ ru_key = split[-1]
+ if ru_key in ('ru_stime', 'ru_utime'):
+ val = float(val)
+ else:
+ val = int(val)
+ ru_type = 'rusage' if split[0] == 'rusage' else \
+ 'child_rusage'
+ bs_task[ru_type][ru_key] = val
+ elif key == 'Status':
+ bs_task['status'] = val
+ if end_time is not None and start_time is not None:
+ bs_task['elapsed_time'] = end_time - start_time
+ else:
+ raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
+ return bs_task
+
+
+class BSTaskAggregate(object):
+ """Class representing multiple runs of the same task"""
+ properties = ('cputime', 'walltime', 'read_bytes', 'write_bytes',
+ 'read_ops', 'write_ops')
+
+ def __init__(self, tasks=None):
+ self._tasks = tasks or []
+ self._properties = {}
+
+ def __getattr__(self, name):
+ if name in self.properties:
+ if name not in self._properties:
+ # Calculate properties on demand only. We only provide mean
+ # value, so far
+ self._properties[name] = mean([getattr(t, name) for t in self._tasks])
+ return self._properties[name]
+ else:
+ raise AttributeError("'BSTaskAggregate' has no attribute '{}'".format(name))
+
+ def append(self, task):
+ """Append new task"""
+ # Reset pre-calculated properties
+ assert isinstance(task, BSTask), "Type is '{}' instead of 'BSTask'".format(type(task))
+ self._properties = {}
+ self._tasks.append(task)
+
+
+class BSRecipe(object):
+ """Class representing buildstats of one recipe"""
+ def __init__(self, name, epoch, version, revision):
+ self.name = name
+ self.epoch = epoch
+ self.version = version
+ self.revision = revision
+ if epoch is None:
+ self.evr = "{}-{}".format(version, revision)
+ else:
+ self.evr = "{}_{}-{}".format(epoch, version, revision)
+ self.tasks = {}
+
+ def aggregate(self, bsrecipe):
+ """Aggregate data of another recipe buildstats"""
+ if self.nevr != bsrecipe.nevr:
+ raise ValueError("Refusing to aggregate buildstats, recipe version "
+ "differs: {} vs. {}".format(self.nevr, bsrecipe.nevr))
+ if set(self.tasks.keys()) != set(bsrecipe.tasks.keys()):
+ raise ValueError("Refusing to aggregate buildstats, set of tasks "
+ "in {} differ".format(self.name))
+
+ for taskname, taskdata in bsrecipe.tasks.items():
+ if not isinstance(self.tasks[taskname], BSTaskAggregate):
+ self.tasks[taskname] = BSTaskAggregate([self.tasks[taskname]])
+ self.tasks[taskname].append(taskdata)
+
+ @property
+ def nevr(self):
+ return self.name + '-' + self.evr
+
+
+class BuildStats(dict):
+ """Class representing buildstats of one build"""
+
+ @property
+ def num_tasks(self):
+ """Get number of tasks"""
+ num = 0
+ for recipe in self.values():
+ num += len(recipe.tasks)
+ return num
+
+ @classmethod
+ def from_json(cls, bs_json):
+ """Create new BuildStats object from JSON object"""
+ buildstats = cls()
+ for recipe in bs_json:
+ if recipe['name'] in buildstats:
+ raise BSError("Cannot handle multiple versions of the same "
+ "package ({})".format(recipe['name']))
+ bsrecipe = BSRecipe(recipe['name'], recipe['epoch'],
+ recipe['version'], recipe['revision'])
+ for task, data in recipe['tasks'].items():
+ bsrecipe.tasks[task] = BSTask(data)
+
+ buildstats[recipe['name']] = bsrecipe
+
+ return buildstats
+
+ @staticmethod
+ def from_file_json(path):
+ """Load buildstats from a JSON file"""
+ with open(path) as fobj:
+ bs_json = json.load(fobj)
+ return BuildStats.from_json(bs_json)
+
+
+ @staticmethod
+ def split_nevr(nevr):
+ """Split name and version information from recipe "nevr" string"""
+ n_e_v, revision = nevr.rsplit('-', 1)
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
+ n_e_v)
+ if not match:
+ # If we're not able to parse a version starting with a number, just
+ # take the part after last dash
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
+ n_e_v)
+ name = match.group('name')
+ version = match.group('version')
+ epoch = match.group('epoch')
+ return name, epoch, version, revision
+
+ @classmethod
+ def from_dir(cls, path):
+ """Load buildstats from a buildstats directory"""
+ if not os.path.isfile(os.path.join(path, 'build_stats')):
+ raise BSError("{} does not look like a buildstats directory".format(path))
+
+ log.debug("Reading buildstats directory %s", path)
+
+ buildstats = cls()
+ subdirs = os.listdir(path)
+ for dirname in subdirs:
+ recipe_dir = os.path.join(path, dirname)
+ if not os.path.isdir(recipe_dir):
+ continue
+ name, epoch, version, revision = cls.split_nevr(dirname)
+ bsrecipe = BSRecipe(name, epoch, version, revision)
+ for task in os.listdir(recipe_dir):
+ bsrecipe.tasks[task] = BSTask.from_file(
+ os.path.join(recipe_dir, task))
+ if name in buildstats:
+ raise BSError("Cannot handle multiple versions of the same "
+ "package ({})".format(name))
+ buildstats[name] = bsrecipe
+
+ return buildstats
+
+ def aggregate(self, buildstats):
+ """Aggregate other buildstats into this"""
+ if set(self.keys()) != set(buildstats.keys()):
+ raise ValueError("Refusing to aggregate buildstats, set of "
+ "recipes is different")
+ for pkg, data in buildstats.items():
+ self[pkg].aggregate(data)
+
+
+def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
+ """Compare the tasks of two buildstats"""
+ tasks_diff = []
+ pkgs = set(bs1.keys()).union(set(bs2.keys()))
+ for pkg in pkgs:
+ tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
+ tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
+ if not tasks1:
+ pkg_op = '+'
+ elif not tasks2:
+ pkg_op = '-'
+ else:
+ pkg_op = ' '
+
+ for task in set(tasks1.keys()).union(set(tasks2.keys())):
+ task_op = ' '
+ if task in tasks1:
+ val1 = getattr(bs1[pkg].tasks[task], stat_attr)
+ else:
+ task_op = '+'
+ val1 = 0
+ if task in tasks2:
+ val2 = getattr(bs2[pkg].tasks[task], stat_attr)
+ else:
+ val2 = 0
+ task_op = '-'
+
+ if val1 == 0:
+ reldiff = float('inf')
+ else:
+ reldiff = 100 * (val2 - val1) / val1
+
+ if min_val and max(val1, val2) < min_val:
+ log.debug("Filtering out %s:%s (%s)", pkg, task,
+ max(val1, val2))
+ continue
+ if min_absdiff and abs(val2 - val1) < min_absdiff:
+ log.debug("Filtering out %s:%s (difference of %s)", pkg, task,
+ val2-val1)
+ continue
+ tasks_diff.append(TaskDiff(pkg, pkg_op, task, task_op, val1, val2,
+ val2-val1, reldiff))
+ return tasks_diff
+
+
+class BSVerDiff(object):
+ """Class representing recipe version differences between two buildstats"""
+ def __init__(self, bs1, bs2):
+ RecipeVerDiff = namedtuple('RecipeVerDiff', 'left right')
+
+ recipes1 = set(bs1.keys())
+ recipes2 = set(bs2.keys())
+
+ self.new = dict([(r, bs2[r]) for r in sorted(recipes2 - recipes1)])
+ self.dropped = dict([(r, bs1[r]) for r in sorted(recipes1 - recipes2)])
+ self.echanged = {}
+ self.vchanged = {}
+ self.rchanged = {}
+ self.unchanged = {}
+ self.empty_diff = False
+
+ common = recipes2.intersection(recipes1)
+ if common:
+ for recipe in common:
+ rdiff = RecipeVerDiff(bs1[recipe], bs2[recipe])
+ if bs1[recipe].epoch != bs2[recipe].epoch:
+ self.echanged[recipe] = rdiff
+ elif bs1[recipe].version != bs2[recipe].version:
+ self.vchanged[recipe] = rdiff
+ elif bs1[recipe].revision != bs2[recipe].revision:
+ self.rchanged[recipe] = rdiff
+ else:
+ self.unchanged[recipe] = rdiff
+
+ if len(recipes1) == len(recipes2) == len(self.unchanged):
+ self.empty_diff = True
+
+ def __bool__(self):
+ return not self.empty_diff
diff --git a/poky/scripts/lib/checklayer/__init__.py b/poky/scripts/lib/checklayer/__init__.py
new file mode 100644
index 000000000..2618416fa
--- /dev/null
+++ b/poky/scripts/lib/checklayer/__init__.py
@@ -0,0 +1,394 @@
+# Yocto Project layer check tool
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import re
+import subprocess
+from enum import Enum
+
+import bb.tinfoil
+
+class LayerType(Enum):
+ BSP = 0
+ DISTRO = 1
+ SOFTWARE = 2
+ ERROR_NO_LAYER_CONF = 98
+ ERROR_BSP_DISTRO = 99
+
+def _get_configurations(path):
+ configs = []
+
+ for f in os.listdir(path):
+ file_path = os.path.join(path, f)
+ if os.path.isfile(file_path) and f.endswith('.conf'):
+ configs.append(f[:-5]) # strip .conf
+ return configs
+
+def _get_layer_collections(layer_path, lconf=None, data=None):
+ import bb.parse
+ import bb.data
+
+ if lconf is None:
+ lconf = os.path.join(layer_path, 'conf', 'layer.conf')
+
+ if data is None:
+ ldata = bb.data.init()
+ bb.parse.init_parser(ldata)
+ else:
+ ldata = data.createCopy()
+
+ ldata.setVar('LAYERDIR', layer_path)
+ try:
+ ldata = bb.parse.handle(lconf, ldata, include=True)
+ except:
+ raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
+ ldata.expandVarref('LAYERDIR')
+
+ collections = (ldata.getVar('BBFILE_COLLECTIONS') or '').split()
+ if not collections:
+ name = os.path.basename(layer_path)
+ collections = [name]
+
+ collections = {c: {} for c in collections}
+ for name in collections:
+ priority = ldata.getVar('BBFILE_PRIORITY_%s' % name)
+ pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
+ depends = ldata.getVar('LAYERDEPENDS_%s' % name)
+ compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
+ collections[name]['priority'] = priority
+ collections[name]['pattern'] = pattern
+ collections[name]['depends'] = depends
+ collections[name]['compat'] = compat
+
+ return collections
+
+def _detect_layer(layer_path):
+ """
+ Scans layer directory to detect what type of layer
+ is BSP, Distro or Software.
+
+ Returns a dictionary with layer name, type and path.
+ """
+
+ layer = {}
+ layer_name = os.path.basename(layer_path)
+
+ layer['name'] = layer_name
+ layer['path'] = layer_path
+ layer['conf'] = {}
+
+ if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')):
+ layer['type'] = LayerType.ERROR_NO_LAYER_CONF
+ return layer
+
+ machine_conf = os.path.join(layer_path, 'conf', 'machine')
+ distro_conf = os.path.join(layer_path, 'conf', 'distro')
+
+ is_bsp = False
+ is_distro = False
+
+ if os.path.isdir(machine_conf):
+ machines = _get_configurations(machine_conf)
+ if machines:
+ is_bsp = True
+
+ if os.path.isdir(distro_conf):
+ distros = _get_configurations(distro_conf)
+ if distros:
+ is_distro = True
+
+ if is_bsp and is_distro:
+ layer['type'] = LayerType.ERROR_BSP_DISTRO
+ elif is_bsp:
+ layer['type'] = LayerType.BSP
+ layer['conf']['machines'] = machines
+ elif is_distro:
+ layer['type'] = LayerType.DISTRO
+ layer['conf']['distros'] = distros
+ else:
+ layer['type'] = LayerType.SOFTWARE
+
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ return layer
+
+def detect_layers(layer_directories, no_auto):
+ layers = []
+
+ for directory in layer_directories:
+ directory = os.path.realpath(directory)
+ if directory[-1] == '/':
+ directory = directory[0:-1]
+
+ if no_auto:
+ conf_dir = os.path.join(directory, 'conf')
+ if os.path.isdir(conf_dir):
+ layer = _detect_layer(directory)
+ if layer:
+ layers.append(layer)
+ else:
+ for root, dirs, files in os.walk(directory):
+ dir_name = os.path.basename(root)
+ conf_dir = os.path.join(root, 'conf')
+ if os.path.isdir(conf_dir):
+ layer = _detect_layer(root)
+ if layer:
+ layers.append(layer)
+
+ return layers
+
+def _find_layer_depends(depend, layers):
+ for layer in layers:
+ for collection in layer['collections']:
+ if depend == collection:
+ return layer
+ return None
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+ def recurse_dependencies(depends, layer, layers, logger, ret = []):
+ logger.debug('Processing dependencies %s for layer %s.' % \
+ (depends, layer['name']))
+
+ for depend in depends.split():
+ # core (oe-core) is suppose to be provided
+ if depend == 'core':
+ continue
+
+ layer_depend = _find_layer_depends(depend, layers)
+ if not layer_depend:
+ logger.error('Layer %s depends on %s and isn\'t found.' % \
+ (layer['name'], depend))
+ ret = None
+ continue
+
+ # We keep processing, even if ret is None, this allows us to report
+ # multiple errors at once
+ if ret is not None and layer_depend not in ret:
+ ret.append(layer_depend)
+
+ # Recursively process...
+ if 'collections' not in layer_depend:
+ continue
+
+ for collection in layer_depend['collections']:
+ collect_deps = layer_depend['collections'][collection]['depends']
+ if not collect_deps:
+ continue
+ ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret)
+
+ return ret
+
+ layer_depends = []
+ for collection in layer['collections']:
+ depends = layer['collections'][collection]['depends']
+ if not depends:
+ continue
+
+ layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
+
+ # Note: [] (empty) is allowed, None is not!
+ if layer_depends is None:
+ return False
+ else:
+ # Don't add a layer that is already present.
+ added = set()
+ output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
+ for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
+ added.add(path)
+
+ for layer_depend in layer_depends:
+ name = layer_depend['name']
+ path = layer_depend['path']
+ if path in added:
+ continue
+ else:
+ added.add(path)
+ logger.info('Adding layer dependency %s' % name)
+ with open(bblayersconf, 'a+') as f:
+ f.write("\nBBLAYERS += \"%s\"\n" % path)
+ return True
+
+def add_layer(bblayersconf, layer, layers, logger):
+ logger.info('Adding layer %s' % layer['name'])
+ with open(bblayersconf, 'a+') as f:
+ f.write("\nBBLAYERS += \"%s\"\n" % layer['path'])
+
+ return True
+
+def check_command(error_msg, cmd):
+ '''
+ Run a command under a shell, capture stdout and stderr in a single stream,
+ throw an error when command returns non-zero exit code. Returns the output.
+ '''
+
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output, _ = p.communicate()
+ if p.returncode:
+ msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8'))
+ raise RuntimeError(msg)
+ return output
+
+def get_signatures(builddir, failsafe=False, machine=None):
+ import re
+
+ # some recipes needs to be excluded like meta-world-pkgdata
+ # because a layer can add recipes to a world build so signature
+ # will be change
+ exclude_recipes = ('meta-world-pkgdata',)
+
+ sigs = {}
+ tune2tasks = {}
+
+ cmd = ''
+ if machine:
+ cmd += 'MACHINE=%s ' % machine
+ cmd += 'bitbake '
+ if failsafe:
+ cmd += '-k '
+ cmd += '-S none world'
+ sigs_file = os.path.join(builddir, 'locked-sigs.inc')
+ if os.path.exists(sigs_file):
+ os.unlink(sigs_file)
+ try:
+ check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.',
+ cmd)
+ except RuntimeError as ex:
+ if failsafe and os.path.exists(sigs_file):
+ # Ignore the error here. Most likely some recipes active
+ # in a world build lack some dependencies. There is a
+ # separate test_machine_world_build which exposes the
+ # failure.
+ pass
+ else:
+ raise
+
+ sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ current_tune = None
+ with open(sigs_file, 'r') as f:
+ for line in f.readlines():
+ line = line.strip()
+ t = tune_regex.search(line)
+ if t:
+ current_tune = t.group('tune')
+ s = sig_regex.match(line)
+ if s:
+ exclude = False
+ for er in exclude_recipes:
+ (recipe, task) = s.group('task').split(':')
+ if er == recipe:
+ exclude = True
+ break
+ if exclude:
+ continue
+
+ sigs[s.group('task')] = s.group('hash')
+ tune2tasks.setdefault(current_tune, []).append(s.group('task'))
+
+ if not sigs:
+ raise RuntimeError('Can\'t load signatures from %s' % sigs_file)
+
+ return (sigs, tune2tasks)
+
+def get_depgraph(targets=['world'], failsafe=False):
+ '''
+ Returns the dependency graph for the given target(s).
+ The dependency graph is taken directly from DepTreeEvent.
+ '''
+ depgraph = None
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False)
+ tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted'])
+ if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'):
+ raise RuntimeError('starting generateDepTreeEvent failed')
+ while True:
+ event = tinfoil.wait_event(timeout=1000)
+ if event:
+ if isinstance(event, bb.command.CommandFailed):
+ raise RuntimeError('Generating dependency information failed: %s' % event.error)
+ elif isinstance(event, bb.command.CommandCompleted):
+ break
+ elif isinstance(event, bb.event.NoProvider):
+ if failsafe:
+ # The event is informational, we will get information about the
+ # remaining dependencies eventually and thus can ignore this
+ # here like we do in get_signatures(), if desired.
+ continue
+ if event._reasons:
+ raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons))
+ else:
+ raise RuntimeError('Nothing provides %s.' % (event._item))
+ elif isinstance(event, bb.event.DepTreeGenerated):
+ depgraph = event._depgraph
+
+ if depgraph is None:
+ raise RuntimeError('Could not retrieve the depgraph.')
+ return depgraph
+
+def compare_signatures(old_sigs, curr_sigs):
+ '''
+ Compares the result of two get_signatures() calls. Returns None if no
+ problems found, otherwise a string that can be used as additional
+ explanation in self.fail().
+ '''
+ # task -> (old signature, new signature)
+ sig_diff = {}
+ for task in old_sigs:
+ if task in curr_sigs and \
+ old_sigs[task] != curr_sigs[task]:
+ sig_diff[task] = (old_sigs[task], curr_sigs[task])
+
+ if not sig_diff:
+ return None
+
+ # Beware, depgraph uses task=<pn>.<taskname> whereas get_signatures()
+ # uses <pn>:<taskname>. Need to convert sometimes. The output follows
+ # the convention from get_signatures() because that seems closer to
+ # normal bitbake output.
+ def sig2graph(task):
+ pn, taskname = task.rsplit(':', 1)
+ return pn + '.' + taskname
+ def graph2sig(task):
+ pn, taskname = task.rsplit('.', 1)
+ return pn + ':' + taskname
+ depgraph = get_depgraph(failsafe=True)
+ depends = depgraph['tdepends']
+
+ # If a task A has a changed signature, but none of its
+ # dependencies, then we need to report it because it is
+ # the one which introduces a change. Any task depending on
+ # A (directly or indirectly) will also have a changed
+ # signature, but we don't need to report it. It might have
+ # its own changes, which will become apparent once the
+ # issues that we do report are fixed and the test gets run
+ # again.
+ sig_diff_filtered = []
+ for task, (old_sig, new_sig) in sig_diff.items():
+ deps_tainted = False
+ for dep in depends.get(sig2graph(task), ()):
+ if graph2sig(dep) in sig_diff:
+ deps_tainted = True
+ break
+ if not deps_tainted:
+ sig_diff_filtered.append((task, old_sig, new_sig))
+
+ msg = []
+ msg.append('%d signatures changed, initial differences (first hash before, second after):' %
+ len(sig_diff))
+ for diff in sorted(sig_diff_filtered):
+ recipe, taskname = diff[0].rsplit(':', 1)
+ cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \
+ (recipe, taskname, diff[1], diff[2])
+ msg.append(' %s: %s -> %s' % diff)
+ msg.append(' %s' % cmd)
+ try:
+ output = check_command('Determining signature difference failed.',
+ cmd).decode('utf-8')
+ except RuntimeError as error:
+ output = str(error)
+ if output:
+ msg.extend([' ' + line for line in output.splitlines()])
+ msg.append('')
+ return '\n'.join(msg)
diff --git a/poky/scripts/lib/checklayer/case.py b/poky/scripts/lib/checklayer/case.py
new file mode 100644
index 000000000..9dd00412e
--- /dev/null
+++ b/poky/scripts/lib/checklayer/case.py
@@ -0,0 +1,7 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class OECheckLayerTestCase(OETestCase):
+ pass
diff --git a/poky/scripts/lib/checklayer/cases/__init__.py b/poky/scripts/lib/checklayer/cases/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/scripts/lib/checklayer/cases/__init__.py
diff --git a/poky/scripts/lib/checklayer/cases/bsp.py b/poky/scripts/lib/checklayer/cases/bsp.py
new file mode 100644
index 000000000..b6b611be7
--- /dev/null
+++ b/poky/scripts/lib/checklayer/cases/bsp.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+
+from checklayer import LayerType, get_signatures, check_command, get_depgraph
+from checklayer.case import OECheckLayerTestCase
+
+class BSPCheckLayer(OECheckLayerTestCase):
+ @classmethod
+ def setUpClass(self):
+ if self.tc.layer['type'] != LayerType.BSP:
+ raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
+ self.tc.layer['name'])
+
+ def test_bsp_defines_machines(self):
+ self.assertTrue(self.tc.layer['conf']['machines'],
+ "Layer is BSP but doesn't defines machines.")
+
+ def test_bsp_no_set_machine(self):
+ from oeqa.utils.commands import get_bb_var
+
+ machine = get_bb_var('MACHINE')
+ self.assertEqual(self.td['bbvars']['MACHINE'], machine,
+ msg="Layer %s modified machine %s -> %s" % \
+ (self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine))
+
+
+ def test_machine_world(self):
+ '''
+ "bitbake world" is expected to work regardless which machine is selected.
+ BSP layers sometimes break that by enabling a recipe for a certain machine
+ without checking whether that recipe actually can be built in the current
+ distro configuration (for example, OpenGL might not enabled).
+
+ This test iterates over all machines. It would be nicer to instantiate
+ it once per machine. It merely checks for errors during parse
+ time. It does not actually attempt to build anything.
+ '''
+
+ if not self.td['machines']:
+ self.skipTest('No machines set with --machines.')
+ msg = []
+ for machine in self.td['machines']:
+ # In contrast to test_machine_signatures() below, errors are fatal here.
+ try:
+ get_signatures(self.td['builddir'], failsafe=False, machine=machine)
+ except RuntimeError as ex:
+ msg.append(str(ex))
+ if msg:
+ msg.insert(0, 'The following machines broke a world build:')
+ self.fail('\n'.join(msg))
+
+ def test_machine_signatures(self):
+ '''
+ Selecting a machine may only affect the signature of tasks that are specific
+ to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe
+ foo and the output of foo, then both machine configurations must build foo
+ in exactly the same way. Otherwise it is not possible to use both machines
+ in the same distribution.
+
+ This criteria can only be tested by testing different machines in combination,
+ i.e. one main layer, potentially several additional BSP layers and an explicit
+ choice of machines:
+ yocto-check-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale
+ '''
+
+ if not self.td['machines']:
+ self.skipTest('No machines set with --machines.')
+
+ # Collect signatures for all machines that we are testing
+ # and merge that into a hash:
+ # tune -> task -> signature -> list of machines with that combination
+ #
+ # It is an error if any tune/task pair has more than one signature,
+ # because that implies that the machines that caused those different
+ # signatures do not agree on how to execute the task.
+ tunes = {}
+ # Preserve ordering of machines as chosen by the user.
+ for machine in self.td['machines']:
+ curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine)
+ # Invert the tune -> [tasks] mapping.
+ tasks2tune = {}
+ for tune, tasks in tune2tasks.items():
+ for task in tasks:
+ tasks2tune[task] = tune
+ for task, sighash in curr_sigs.items():
+ tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine)
+
+ msg = []
+ pruned = 0
+ last_line_key = None
+ # do_fetch, do_unpack, ..., do_build
+ taskname_list = []
+ if tunes:
+ # The output below is most useful when we start with tasks that are at
+ # the bottom of the dependency chain, i.e. those that run first. If
+ # those tasks differ, the rest also does.
+ #
+ # To get an ordering of tasks, we do a topological sort of the entire
+ # depgraph for the base configuration, then on-the-fly flatten that list by stripping
+ # out the recipe names and removing duplicates. The base configuration
+ # is not necessarily representative, but should be close enough. Tasks
+ # that were not encountered get a default priority.
+ depgraph = get_depgraph()
+ depends = depgraph['tdepends']
+ WHITE = 1
+ GRAY = 2
+ BLACK = 3
+ color = {}
+ found = set()
+ def visit(task):
+ color[task] = GRAY
+ for dep in depends.get(task, ()):
+ if color.setdefault(dep, WHITE) == WHITE:
+ visit(dep)
+ color[task] = BLACK
+ pn, taskname = task.rsplit('.', 1)
+ if taskname not in found:
+ taskname_list.append(taskname)
+ found.add(taskname)
+ for task in depends.keys():
+ if color.setdefault(task, WHITE) == WHITE:
+ visit(task)
+
+ taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ])
+ def task_key(task):
+ pn, taskname = task.rsplit(':', 1)
+ return (pn, taskname_order.get(taskname, len(taskname_list)), taskname)
+
+ for tune in sorted(tunes.keys()):
+ tasks = tunes[tune]
+ # As for test_signatures it would be nicer to sort tasks
+ # by dependencies here, but that is harder because we have
+ # to report on tasks from different machines, which might
+ # have different dependencies. We resort to pruning the
+ # output by reporting only one task per recipe if the set
+ # of machines matches.
+ #
+ # "bitbake-diffsigs -t -s" is intelligent enough to print
+ # diffs recursively, so often it does not matter that much
+ # if we don't pick the underlying difference
+ # here. However, sometimes recursion fails
+ # (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428).
+ #
+ # To mitigate that a bit, we use a hard-coded ordering of
+ # tasks that represents how they normally run and prefer
+ # to print the ones that run first.
+ for task in sorted(tasks.keys(), key=task_key):
+ signatures = tasks[task]
+ # do_build can be ignored: it is know to have
+ # different signatures in some cases, for example in
+ # the allarch ca-certificates due to RDEPENDS=openssl.
+ # That particular dependency is whitelisted via
+ # SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
+ # in the sstate signature hash because filtering it
+ # out would be hard and running do_build multiple
+ # times doesn't really matter.
+ if len(signatures.keys()) > 1 and \
+ not task.endswith(':do_build'):
+ # Error!
+ #
+ # Sort signatures by machines, because the hex values don't mean anything.
+ # => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64)
+ #
+ # Skip the line if it is covered already by the predecessor (same pn, same sets of machines).
+ pn, taskname = task.rsplit(':', 1)
+ next_line_key = (pn, sorted(signatures.values()))
+ if next_line_key != last_line_key:
+ line = ' %s %s: ' % (tune, task)
+ line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for
+ signature in sorted(signatures.keys(), key=lambda s: signatures[s])])
+ last_line_key = next_line_key
+ msg.append(line)
+ # Randomly pick two mismatched signatures and remember how to invoke
+ # bitbake-diffsigs for them.
+ iterator = iter(signatures.items())
+ a = next(iterator)
+ b = next(iterator)
+ diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1]))
+ diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0])
+ else:
+ pruned += 1
+
+ if msg:
+ msg.insert(0, 'The machines have conflicting signatures for some shared tasks:')
+ if pruned > 0:
+ msg.append('')
+ msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned)
+ msg.append('It is likely that differences from different recipes also have the same root cause.')
+ msg.append('')
+ # Explain how to investigate...
+ msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.')
+ cmd = 'bitbake-diffsigs %s' % diffsig_params
+ msg.append('Example: %s in the last line' % diffsig_machines)
+ msg.append('Command: %s' % cmd)
+ # ... and actually do it automatically for that example, but without aborting
+ # when that fails.
+ try:
+ output = check_command('Comparing signatures failed.', cmd).decode('utf-8')
+ except RuntimeError as ex:
+ output = str(ex)
+ msg.extend([' ' + line for line in output.splitlines()])
+ self.fail('\n'.join(msg))
diff --git a/poky/scripts/lib/checklayer/cases/common.py b/poky/scripts/lib/checklayer/cases/common.py
new file mode 100644
index 000000000..1bef61b04
--- /dev/null
+++ b/poky/scripts/lib/checklayer/cases/common.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import glob
+import os
+import unittest
+from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
+from checklayer.case import OECheckLayerTestCase
+
+class CommonCheckLayer(OECheckLayerTestCase):
+ def test_readme(self):
+ # The top-level README file may have a suffix (like README.rst or README.txt).
+ readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*'))
+ self.assertTrue(len(readme_files) > 0,
+ msg="Layer doesn't contains README file.")
+
+ # There might be more than one file matching the file pattern above
+ # (for example, README.rst and README-COPYING.rst). The one with the shortest
+ # name is considered the "main" one.
+ readme_file = sorted(readme_files)[0]
+ data = ''
+ with open(readme_file, 'r') as f:
+ data = f.read()
+ self.assertTrue(data,
+ msg="Layer contains a README file but it is empty.")
+
+ def test_parse(self):
+ check_command('Layer %s failed to parse.' % self.tc.layer['name'],
+ 'bitbake -p')
+
+ def test_show_environment(self):
+ check_command('Layer %s failed to show environment.' % self.tc.layer['name'],
+ 'bitbake -e')
+
+ def test_world(self):
+ '''
+ "bitbake world" is expected to work. test_signatures does not cover that
+ because it is more lenient and ignores recipes in a world build that
+ are not actually buildable, so here we fail when "bitbake -S none world"
+ fails.
+ '''
+ get_signatures(self.td['builddir'], failsafe=False)
+
+ def test_signatures(self):
+ if self.tc.layer['type'] == LayerType.SOFTWARE and \
+ not self.tc.test_software_layer_signatures:
+ raise unittest.SkipTest("Not testing for signature changes in a software layer %s." \
+ % self.tc.layer['name'])
+
+ curr_sigs, _ = get_signatures(self.td['builddir'], failsafe=True)
+ msg = compare_signatures(self.td['sigs'], curr_sigs)
+ if msg is not None:
+ self.fail('Adding layer %s changed signatures.\n%s' % (self.tc.layer['name'], msg))
+
+ def test_layerseries_compat(self):
+ for collection_name, collection_data in self.tc.layer['collections'].items():
+ self.assertTrue(collection_data['compat'], "Collection %s from layer %s does not set compatible oe-core versions via LAYERSERIES_COMPAT_collection." \
+ % (collection_name, self.tc.layer['name']))
diff --git a/poky/scripts/lib/checklayer/cases/distro.py b/poky/scripts/lib/checklayer/cases/distro.py
new file mode 100644
index 000000000..df1b3035e
--- /dev/null
+++ b/poky/scripts/lib/checklayer/cases/distro.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+
+from checklayer import LayerType
+from checklayer.case import OECheckLayerTestCase
+
+class DistroCheckLayer(OECheckLayerTestCase):
+ @classmethod
+ def setUpClass(self):
+ if self.tc.layer['type'] != LayerType.DISTRO:
+ raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
+ self.tc.layer['name'])
+
+ def test_distro_defines_distros(self):
+ self.assertTrue(self.tc.layer['conf']['distros'],
+ "Layer is BSP but doesn't defines machines.")
+
+ def test_distro_no_set_distros(self):
+ from oeqa.utils.commands import get_bb_var
+
+ distro = get_bb_var('DISTRO')
+ self.assertEqual(self.td['bbvars']['DISTRO'], distro,
+ msg="Layer %s modified distro %s -> %s" % \
+ (self.tc.layer['name'], self.td['bbvars']['DISTRO'], distro))
diff --git a/poky/scripts/lib/checklayer/context.py b/poky/scripts/lib/checklayer/context.py
new file mode 100644
index 000000000..1bec2c410
--- /dev/null
+++ b/poky/scripts/lib/checklayer/context.py
@@ -0,0 +1,15 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import glob
+import re
+
+from oeqa.core.context import OETestContext
+
+class CheckLayerTestContext(OETestContext):
+ def __init__(self, td=None, logger=None, layer=None, test_software_layer_signatures=True):
+ super(CheckLayerTestContext, self).__init__(td, logger)
+ self.layer = layer
+ self.test_software_layer_signatures = test_software_layer_signatures
diff --git a/poky/scripts/lib/devtool/__init__.py b/poky/scripts/lib/devtool/__init__.py
new file mode 100644
index 000000000..89f098a91
--- /dev/null
+++ b/poky/scripts/lib/devtool/__init__.py
@@ -0,0 +1,383 @@
+#!/usr/bin/env python3
+
+# Development tool - utility functions for plugins
+#
+# Copyright (C) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugins module"""
+
+import os
+import sys
+import subprocess
+import logging
+import re
+import codecs
+
+logger = logging.getLogger('devtool')
+
+class DevtoolError(Exception):
+ """Exception for handling devtool errors"""
+ def __init__(self, message, exitcode=1):
+ super(DevtoolError, self).__init__(message)
+ self.exitcode = exitcode
+
+
+def exec_build_env_command(init_path, builddir, cmd, watch=False, **options):
+ """Run a program in bitbake build context"""
+ import bb
+ if not 'cwd' in options:
+ options["cwd"] = builddir
+ if init_path:
+ # As the OE init script makes use of BASH_SOURCE to determine OEROOT,
+ # and can't determine it when running under dash, we need to set
+ # the executable to bash to correctly set things up
+ if not 'executable' in options:
+ options['executable'] = 'bash'
+ logger.debug('Executing command: "%s" using init path %s' % (cmd, init_path))
+ init_prefix = '. %s %s > /dev/null && ' % (init_path, builddir)
+ else:
+ logger.debug('Executing command "%s"' % cmd)
+ init_prefix = ''
+ if watch:
+ if sys.stdout.isatty():
+ # Fool bitbake into thinking it's outputting to a terminal (because it is, indirectly)
+ cmd = 'script -e -q -c "%s" /dev/null' % cmd
+ return exec_watch('%s%s' % (init_prefix, cmd), **options)
+ else:
+ return bb.process.run('%s%s' % (init_prefix, cmd), **options)
+
+def exec_watch(cmd, **options):
+ """Run program with stdout shown on sys.stdout"""
+ import bb
+ if isinstance(cmd, str) and not "shell" in options:
+ options["shell"] = True
+
+ process = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
+ )
+
+ reader = codecs.getreader('utf-8')(process.stdout)
+ buf = ''
+ while True:
+ out = reader.read(1, 1)
+ if out:
+ sys.stdout.write(out)
+ sys.stdout.flush()
+ buf += out
+ elif out == '' and process.poll() != None:
+ break
+
+ if process.returncode != 0:
+ raise bb.process.ExecutionError(cmd, process.returncode, buf, None)
+
+ return buf, None
+
+def exec_fakeroot(d, cmd, **kwargs):
+ """Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
+ # Grab the command and check it actually exists
+ fakerootcmd = d.getVar('FAKEROOTCMD')
+ if not os.path.exists(fakerootcmd):
+ logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
+ return 2
+ # Set up the appropriate environment
+ newenv = dict(os.environ)
+ fakerootenv = d.getVar('FAKEROOTENV')
+ for varvalue in fakerootenv.split():
+ if '=' in varvalue:
+ splitval = varvalue.split('=', 1)
+ newenv[splitval[0]] = splitval[1]
+ return subprocess.call("%s %s" % (fakerootcmd, cmd), env=newenv, **kwargs)
+
+def setup_tinfoil(config_only=False, basepath=None, tracking=False):
+ """Initialize tinfoil api from bitbake"""
+ import scriptpath
+ orig_cwd = os.path.abspath(os.curdir)
+ try:
+ if basepath:
+ os.chdir(basepath)
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
+ try:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only)
+ except bb.tinfoil.TinfoilUIException:
+ tinfoil.shutdown()
+ raise DevtoolError('Failed to start bitbake environment')
+ except:
+ tinfoil.shutdown()
+ raise
+ finally:
+ os.chdir(orig_cwd)
+ return tinfoil
+
+def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
+ """Parse the specified recipe"""
+ try:
+ recipefile = tinfoil.get_recipe_file(pn)
+ except bb.providers.NoProvider as e:
+ logger.error(str(e))
+ return None
+ if appends:
+ append_files = tinfoil.get_file_appends(recipefile)
+ if filter_workspace:
+ # Filter out appends from the workspace
+ append_files = [path for path in append_files if
+ not path.startswith(config.workspace_path)]
+ else:
+ append_files = None
+ try:
+ rd = tinfoil.parse_recipe_file(recipefile, appends, append_files)
+ except Exception as e:
+ logger.error(str(e))
+ return None
+ return rd
+
+def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
+ """
+ Check that a recipe is in the workspace and (optionally) that source
+ is present.
+ """
+
+ workspacepn = pn
+
+ for recipe, value in workspace.items():
+ if recipe == pn:
+ break
+ if bbclassextend:
+ recipefile = value['recipefile']
+ if recipefile:
+ targets = get_bbclassextend_targets(recipefile, recipe)
+ if pn in targets:
+ workspacepn = recipe
+ break
+ else:
+ raise DevtoolError("No recipe named '%s' in your workspace" % pn)
+
+ if checksrc:
+ srctree = workspace[workspacepn]['srctree']
+ if not os.path.exists(srctree):
+ raise DevtoolError("Source tree %s for recipe %s does not exist" % (srctree, workspacepn))
+ if not os.listdir(srctree):
+ raise DevtoolError("Source tree %s for recipe %s is empty" % (srctree, workspacepn))
+
+ return workspacepn
+
+def use_external_build(same_dir, no_same_dir, d):
+ """
+ Determine if we should use B!=S (separate build and source directories) or not
+ """
+ b_is_s = True
+ if no_same_dir:
+ logger.info('Using separate build directory since --no-same-dir specified')
+ b_is_s = False
+ elif same_dir:
+ logger.info('Using source tree as build directory since --same-dir specified')
+ elif bb.data.inherits_class('autotools-brokensep', d):
+ logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
+ elif os.path.abspath(d.getVar('B')) == os.path.abspath(d.getVar('S')):
+ logger.info('Using source tree as build directory since that would be the default for this recipe')
+ else:
+ b_is_s = False
+ return b_is_s
+
+def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
+ """
+ Set up the git repository for the source tree
+ """
+ import bb.process
+ import oe.patch
+ if not os.path.exists(os.path.join(repodir, '.git')):
+ bb.process.run('git init', cwd=repodir)
+ bb.process.run('git add .', cwd=repodir)
+ commit_cmd = ['git']
+ oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
+ commit_cmd += ['commit', '-q']
+ stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
+ if not stdout:
+ commit_cmd.append('--allow-empty')
+ commitmsg = "Initial empty commit with no upstream sources"
+ elif version:
+ commitmsg = "Initial commit from upstream at version %s" % version
+ else:
+ commitmsg = "Initial commit from upstream"
+ commit_cmd += ['-m', commitmsg]
+ bb.process.run(commit_cmd, cwd=repodir)
+
+ # Ensure singletask.lock (as used by externalsrc.bbclass) is ignored by git
+ excludes = []
+ excludefile = os.path.join(repodir, '.git', 'info', 'exclude')
+ try:
+ with open(excludefile, 'r') as f:
+ excludes = f.readlines()
+ except FileNotFoundError:
+ pass
+ if 'singletask.lock\n' not in excludes:
+ excludes.append('singletask.lock\n')
+ with open(excludefile, 'w') as f:
+ for line in excludes:
+ f.write(line)
+
+ bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
+ bb.process.run('git tag -f %s' % basetag, cwd=repodir)
+
+def recipe_to_append(recipefile, config, wildcard=False):
+ """
+ Convert a recipe file to a bbappend file path within the workspace.
+ NOTE: if the bbappend already exists, you should be using
+ workspace[args.recipename]['bbappend'] instead of calling this
+ function.
+ """
+ appendname = os.path.splitext(os.path.basename(recipefile))[0]
+ if wildcard:
+ appendname = re.sub(r'_.*', '_%', appendname)
+ appendpath = os.path.join(config.workspace_path, 'appends')
+ appendfile = os.path.join(appendpath, appendname + '.bbappend')
+ return appendfile
+
+def get_bbclassextend_targets(recipefile, pn):
+ """
+ Cheap function to get BBCLASSEXTEND and then convert that to the
+ list of targets that would result.
+ """
+ import bb.utils
+
+ values = {}
+ def get_bbclassextend_varfunc(varname, origvalue, op, newlines):
+ values[varname] = origvalue
+ return origvalue, None, 0, True
+ with open(recipefile, 'r') as f:
+ bb.utils.edit_metadata(f, ['BBCLASSEXTEND'], get_bbclassextend_varfunc)
+
+ targets = []
+ bbclassextend = values.get('BBCLASSEXTEND', '').split()
+ if bbclassextend:
+ for variant in bbclassextend:
+ if variant == 'nativesdk':
+ targets.append('%s-%s' % (variant, pn))
+ elif variant in ['native', 'cross', 'crosssdk']:
+ targets.append('%s-%s' % (pn, variant))
+ return targets
+
+def replace_from_file(path, old, new):
+ """Replace strings on a file"""
+
+ def read_file(path):
+ data = None
+ with open(path) as f:
+ data = f.read()
+ return data
+
+ def write_file(path, data):
+ if data is None:
+ return
+ wdata = data.rstrip() + "\n"
+ with open(path, "w") as f:
+ f.write(wdata)
+
+ # In case old is None, return immediately
+ if old is None:
+ return
+ try:
+ rdata = read_file(path)
+ except IOError as e:
+ # if file does not exit, just quit, otherwise raise an exception
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+ old_contents = rdata.splitlines()
+ new_contents = []
+ for old_content in old_contents:
+ try:
+ new_contents.append(old_content.replace(old, new))
+ except ValueError:
+ pass
+ write_file(path, "\n".join(new_contents))
+
+
+def update_unlockedsigs(basepath, workspace, fixed_setup, extra=None):
+ """ This function will make unlocked-sigs.inc match the recipes in the
+ workspace plus any extras we want unlocked. """
+
+ if not fixed_setup:
+ # Only need to write this out within the eSDK
+ return
+
+ if not extra:
+ extra = []
+
+ confdir = os.path.join(basepath, 'conf')
+ unlockedsigs = os.path.join(confdir, 'unlocked-sigs.inc')
+
+ # Get current unlocked list if any
+ values = {}
+ def get_unlockedsigs_varfunc(varname, origvalue, op, newlines):
+ values[varname] = origvalue
+ return origvalue, None, 0, True
+ if os.path.exists(unlockedsigs):
+ with open(unlockedsigs, 'r') as f:
+ bb.utils.edit_metadata(f, ['SIGGEN_UNLOCKED_RECIPES'], get_unlockedsigs_varfunc)
+ unlocked = sorted(values.get('SIGGEN_UNLOCKED_RECIPES', []))
+
+ # If the new list is different to the current list, write it out
+ newunlocked = sorted(list(workspace.keys()) + extra)
+ if unlocked != newunlocked:
+ bb.utils.mkdirhier(confdir)
+ with open(unlockedsigs, 'w') as f:
+ f.write("# DO NOT MODIFY! YOUR CHANGES WILL BE LOST.\n" +
+ "# This layer was created by the OpenEmbedded devtool" +
+ " utility in order to\n" +
+ "# contain recipes that are unlocked.\n")
+
+ f.write('SIGGEN_UNLOCKED_RECIPES += "\\\n')
+ for pn in newunlocked:
+ f.write(' ' + pn)
+ f.write('"')
+
+def check_prerelease_version(ver, operation):
+ if 'pre' in ver or 'rc' in ver:
+ logger.warning('Version "%s" looks like a pre-release version. '
+ 'If that is the case, in order to ensure that the '
+ 'version doesn\'t appear to go backwards when you '
+ 'later upgrade to the final release version, it is '
+ 'recommmended that instead you use '
+ '<current version>+<pre-release version> e.g. if '
+ 'upgrading from 1.9 to 2.0-rc2 use "1.9+2.0-rc2". '
+ 'If you prefer not to reset and re-try, you can change '
+ 'the version after %s succeeds using "devtool rename" '
+ 'with -V/--version.' % (ver, operation))
+
+def check_git_repo_dirty(repodir):
+ """Check if a git repository is clean or not"""
+ stdout, _ = bb.process.run('git status --porcelain', cwd=repodir)
+ return stdout
+
+def check_git_repo_op(srctree, ignoredirs=None):
+ """Check if a git repository is in the middle of a rebase"""
+ stdout, _ = bb.process.run('git rev-parse --show-toplevel', cwd=srctree)
+ topleveldir = stdout.strip()
+ if ignoredirs and topleveldir in ignoredirs:
+ return
+ gitdir = os.path.join(topleveldir, '.git')
+ if os.path.exists(os.path.join(gitdir, 'rebase-merge')):
+ raise DevtoolError("Source tree %s appears to be in the middle of a rebase - please resolve this first" % srctree)
+ if os.path.exists(os.path.join(gitdir, 'rebase-apply')):
+ raise DevtoolError("Source tree %s appears to be in the middle of 'git am' or 'git apply' - please resolve this first" % srctree)
diff --git a/poky/scripts/lib/devtool/build.py b/poky/scripts/lib/devtool/build.py
new file mode 100644
index 000000000..252379e9b
--- /dev/null
+++ b/poky/scripts/lib/devtool/build.py
@@ -0,0 +1,86 @@
+# Development tool - build command plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool build plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import tempfile
+from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+
+def _set_file_values(fn, values):
+ remaining = list(values.keys())
+
+ def varfunc(varname, origvalue, op, newlines):
+ newvalue = values.get(varname, origvalue)
+ remaining.remove(varname)
+ return (newvalue, '=', 0, True)
+
+ with open(fn, 'r') as f:
+ (updated, newlines) = bb.utils.edit_metadata(f, values, varfunc)
+
+ for item in remaining:
+ updated = True
+ newlines.append('%s = "%s"' % (item, values[item]))
+
+ if updated:
+ with open(fn, 'w') as f:
+ f.writelines(newlines)
+ return updated
+
+def _get_build_tasks(config):
+ tasks = config.get('Build', 'build_task', 'populate_sysroot,packagedata').split(',')
+ return ['do_%s' % task.strip() for task in tasks]
+
+def build(args, config, basepath, workspace):
+ """Entry point for the devtool 'build' subcommand"""
+ workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+
+ build_tasks = _get_build_tasks(config)
+
+ bbappend = workspace[workspacepn]['bbappend']
+ if args.disable_parallel_make:
+ logger.info("Disabling 'make' parallelism")
+ _set_file_values(bbappend, {'PARALLEL_MAKE': ''})
+ try:
+ bbargs = []
+ for task in build_tasks:
+ if args.recipename.endswith('-native') and 'package' in task:
+ continue
+ bbargs.append('%s:%s' % (args.recipename, task))
+ exec_build_env_command(config.init_path, basepath, 'bitbake %s' % ' '.join(bbargs), watch=True)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+ finally:
+ if args.disable_parallel_make:
+ _set_file_values(bbappend, {'PARALLEL_MAKE': None})
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_build = subparsers.add_parser('build', help='Build a recipe',
+ description='Builds the specified recipe using bitbake (up to and including %s)' % ', '.join(_get_build_tasks(context.config)),
+ group='working', order=50)
+ parser_build.add_argument('recipename', help='Recipe to build')
+ parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
+ parser_build.set_defaults(func=build)
diff --git a/poky/scripts/lib/devtool/build_image.py b/poky/scripts/lib/devtool/build_image.py
new file mode 100644
index 000000000..e5810389b
--- /dev/null
+++ b/poky/scripts/lib/devtool/build_image.py
@@ -0,0 +1,174 @@
+# Development tool - build-image plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool plugin containing the build-image subcommand."""
+
+import os
+import errno
+import logging
+
+from bb.process import ExecutionError
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+class TargetNotImageError(Exception):
+ pass
+
+def _get_packages(tinfoil, workspace, config):
+ """Get list of packages from recipes in the workspace."""
+ result = []
+ for recipe in workspace:
+ data = parse_recipe(config, tinfoil, recipe, True)
+ if 'class-target' in data.getVar('OVERRIDES').split(':'):
+ if recipe in data.getVar('PACKAGES').split():
+ result.append(recipe)
+ else:
+ logger.warning("Skipping recipe %s as it doesn't produce a "
+ "package with the same name", recipe)
+ return result
+
+def build_image(args, config, basepath, workspace):
+ """Entry point for the devtool 'build-image' subcommand."""
+
+ image = args.imagename
+ auto_image = False
+ if not image:
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ image = sdk_targets[0]
+ auto_image = True
+ if not image:
+ raise DevtoolError('Unable to determine image to build, please specify one')
+
+ try:
+ if args.add_packages:
+ add_packages = args.add_packages.split(',')
+ else:
+ add_packages = None
+ result, outputdir = build_image_task(config, basepath, workspace, image, add_packages)
+ except TargetNotImageError:
+ if auto_image:
+ raise DevtoolError('Unable to determine image to build, please specify one')
+ else:
+ raise DevtoolError('Specified recipe %s is not an image recipe' % image)
+
+ if result == 0:
+ logger.info('Successfully built %s. You can find output files in %s'
+ % (image, outputdir))
+ return result
+
+def build_image_task(config, basepath, workspace, image, add_packages=None, task=None, extra_append=None):
+ # remove <image>.bbappend to make sure setup_tinfoil doesn't
+ # break because of it
+ target_basename = config.get('SDK', 'target_basename', '')
+ if target_basename:
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+ try:
+ os.unlink(appendfile)
+ except OSError as exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, image, True)
+ if not rd:
+ # Error already shown
+ return (1, None)
+ if not bb.data.inherits_class('image', rd):
+ raise TargetNotImageError()
+
+ # Get the actual filename used and strip the .bb and full path
+ target_basename = rd.getVar('FILE')
+ target_basename = os.path.splitext(os.path.basename(target_basename))[0]
+ config.set('SDK', 'target_basename', target_basename)
+ config.write()
+
+ appendfile = os.path.join(config.workspace_path, 'appends',
+ '%s.bbappend' % target_basename)
+
+ outputdir = None
+ try:
+ if workspace or add_packages:
+ if add_packages:
+ packages = add_packages
+ else:
+ packages = _get_packages(tinfoil, workspace, config)
+ else:
+ packages = None
+ if not task:
+ if not packages and not add_packages and workspace:
+ logger.warning('No recipes in workspace, building image %s unmodified', image)
+ elif not packages:
+ logger.warning('No packages to add, building image %s unmodified', image)
+
+ if packages or extra_append:
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as afile:
+ if packages:
+ # include packages from workspace recipes into the image
+ afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ if not task:
+ logger.info('Building image %s with the following '
+ 'additional packages: %s', image, ' '.join(packages))
+ if extra_append:
+ for line in extra_append:
+ afile.write('%s\n' % line)
+
+ if task in ['populate_sdk', 'populate_sdk_ext']:
+ outputdir = rd.getVar('SDK_DEPLOY')
+ else:
+ outputdir = rd.getVar('DEPLOY_DIR_IMAGE')
+
+ tmp_tinfoil = tinfoil
+ tinfoil = None
+ tmp_tinfoil.shutdown()
+
+ options = ''
+ if task:
+ options += '-c %s' % task
+
+ # run bitbake to build image (or specified task)
+ try:
+ exec_build_env_command(config.init_path, basepath,
+ 'bitbake %s %s' % (options, image), watch=True)
+ except ExecutionError as err:
+ return (err.exitcode, None)
+ finally:
+ if os.path.isfile(appendfile):
+ os.unlink(appendfile)
+ finally:
+ if tinfoil:
+ tinfoil.shutdown()
+ return (0, outputdir)
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the build-image plugin"""
+ parser = subparsers.add_parser('build-image',
+ help='Build image including workspace recipe packages',
+ description='Builds an image, extending it to include '
+ 'packages from recipes in the workspace',
+ group='testbuild', order=-10)
+ parser.add_argument('imagename', help='Image recipe to build', nargs='?')
+ parser.add_argument('-p', '--add-packages', help='Instead of adding packages for the '
+ 'entire workspace, specify packages to be added to the image '
+ '(separate multiple packages by commas)',
+ metavar='PACKAGES')
+ parser.set_defaults(func=build_image)
diff --git a/poky/scripts/lib/devtool/build_sdk.py b/poky/scripts/lib/devtool/build_sdk.py
new file mode 100644
index 000000000..b89d65b0c
--- /dev/null
+++ b/poky/scripts/lib/devtool/build_sdk.py
@@ -0,0 +1,65 @@
+# Development tool - build-sdk command plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import subprocess
+import logging
+import glob
+import shutil
+import errno
+import sys
+import tempfile
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+from devtool import build_image
+
+logger = logging.getLogger('devtool')
+
+
+def build_sdk(args, config, basepath, workspace):
+ """Entry point for the devtool build-sdk command"""
+
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ image = sdk_targets[0]
+ else:
+ raise DevtoolError('Unable to determine image to build SDK for')
+
+ extra_append = ['SDK_DERIVATIVE = "1"']
+ try:
+ result, outputdir = build_image.build_image_task(config,
+ basepath,
+ workspace,
+ image,
+ task='populate_sdk_ext',
+ extra_append=extra_append)
+ except build_image.TargetNotImageError:
+ raise DevtoolError('Unable to determine image to build SDK for')
+
+ if result == 0:
+ logger.info('Successfully built SDK. You can find output files in %s'
+ % outputdir)
+ return result
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands"""
+ if context.fixed_setup:
+ parser_build_sdk = subparsers.add_parser('build-sdk',
+ help='Build a derivative SDK of this one',
+ description='Builds an extensible SDK based upon this one and the items in your workspace',
+ group='advanced')
+ parser_build_sdk.set_defaults(func=build_sdk)
diff --git a/poky/scripts/lib/devtool/deploy.py b/poky/scripts/lib/devtool/deploy.py
new file mode 100644
index 000000000..52e261d56
--- /dev/null
+++ b/poky/scripts/lib/devtool/deploy.py
@@ -0,0 +1,350 @@
+# Development tool - deploy/undeploy command plugin
+#
+# Copyright (C) 2014-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugin containing the deploy subcommands"""
+
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+
+import bb.utils
+import argparse_oe
+import oe.types
+
+from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+deploylist_path = '/.devtool'
+
+def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=False, nopreserve=False, nocheckspace=False):
+ """
+ Prepare a shell script for running on the target to
+ deploy/undeploy files. We have to be careful what we put in this
+ script - only commands that are likely to be available on the
+ target are suitable (the target might be constrained, e.g. using
+ busybox rather than bash with coreutils).
+ """
+ lines = []
+ lines.append('#!/bin/sh')
+ lines.append('set -e')
+ if undeployall:
+ # Yes, I know this is crude - but it does work
+ lines.append('for entry in %s/*.list; do' % deploylist_path)
+ lines.append('[ ! -f $entry ] && exit')
+ lines.append('set `basename $entry | sed "s/.list//"`')
+ if dryrun:
+ if not deploy:
+ lines.append('echo "Previously deployed files for $1:"')
+ lines.append('manifest="%s/$1.list"' % deploylist_path)
+ lines.append('preservedir="%s/$1.preserve"' % deploylist_path)
+ lines.append('if [ -f $manifest ] ; then')
+ # Read manifest in reverse and delete files / remove empty dirs
+ lines.append(' sed \'1!G;h;$!d\' $manifest | while read file')
+ lines.append(' do')
+ if dryrun:
+ lines.append(' if [ ! -d $file ] ; then')
+ lines.append(' echo $file')
+ lines.append(' fi')
+ else:
+ lines.append(' if [ -d $file ] ; then')
+ # Avoid deleting a preserved directory in case it has special perms
+ lines.append(' if [ ! -d $preservedir/$file ] ; then')
+ lines.append(' rmdir $file > /dev/null 2>&1 || true')
+ lines.append(' fi')
+ lines.append(' else')
+ lines.append(' rm -f $file')
+ lines.append(' fi')
+ lines.append(' done')
+ if not dryrun:
+ lines.append(' rm $manifest')
+ if not deploy and not dryrun:
+ # May as well remove all traces
+ lines.append(' rmdir `dirname $manifest` > /dev/null 2>&1 || true')
+ lines.append('fi')
+
+ if deploy:
+ if not nocheckspace:
+ # Check for available space
+ # FIXME This doesn't take into account files spread across multiple
+ # partitions, but doing that is non-trivial
+ # Find the part of the destination path that exists
+ lines.append('checkpath="$2"')
+ lines.append('while [ "$checkpath" != "/" ] && [ ! -e $checkpath ]')
+ lines.append('do')
+ lines.append(' checkpath=`dirname "$checkpath"`')
+ lines.append('done')
+ lines.append(r'freespace=$(df -P $checkpath | sed -nre "s/^(\S+\s+){3}([0-9]+).*/\2/p")')
+ # First line of the file is the total space
+ lines.append('total=`head -n1 $3`')
+ lines.append('if [ $total -gt $freespace ] ; then')
+ lines.append(' echo "ERROR: insufficient space on target (available ${freespace}, needed ${total})"')
+ lines.append(' exit 1')
+ lines.append('fi')
+ if not nopreserve:
+ # Preserve any files that exist. Note that this will add to the
+ # preserved list with successive deployments if the list of files
+ # deployed changes, but because we've deleted any previously
+ # deployed files at this point it will never preserve anything
+ # that was deployed, only files that existed prior to any deploying
+ # (which makes the most sense)
+ lines.append('cat $3 | sed "1d" | while read file fsize')
+ lines.append('do')
+ lines.append(' if [ -e $file ] ; then')
+ lines.append(' dest="$preservedir/$file"')
+ lines.append(' mkdir -p `dirname $dest`')
+ lines.append(' mv $file $dest')
+ lines.append(' fi')
+ lines.append('done')
+ lines.append('rm $3')
+ lines.append('mkdir -p `dirname $manifest`')
+ lines.append('mkdir -p $2')
+ if verbose:
+ lines.append(' tar xv -C $2 -f - | tee $manifest')
+ else:
+ lines.append(' tar xv -C $2 -f - > $manifest')
+ lines.append('sed -i "s!^./!$2!" $manifest')
+ elif not dryrun:
+ # Put any preserved files back
+ lines.append('if [ -d $preservedir ] ; then')
+ lines.append(' cd $preservedir')
+ # find from busybox might not have -exec, so we don't use that
+ lines.append(' find . -type f | while read file')
+ lines.append(' do')
+ lines.append(' mv $file /$file')
+ lines.append(' done')
+ lines.append(' cd /')
+ lines.append(' rm -rf $preservedir')
+ lines.append('fi')
+
+ if undeployall:
+ if not dryrun:
+ lines.append('echo "NOTE: Successfully undeployed $1"')
+ lines.append('done')
+
+ # Delete the script itself
+ lines.append('rm $0')
+ lines.append('')
+
+ return '\n'.join(lines)
+
+
+
+def deploy(args, config, basepath, workspace):
+ """Entry point for the devtool 'deploy' subcommand"""
+ import math
+ import oe.recipeutils
+ import oe.package
+
+ check_workspace_recipe(workspace, args.recipename, checksrc=False)
+
+ try:
+ host, destdir = args.target.split(':')
+ except ValueError:
+ destdir = '/'
+ else:
+ args.target = host
+ if not destdir.endswith('/'):
+ destdir += '/'
+
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ try:
+ rd = tinfoil.parse_recipe(args.recipename)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+ recipe_outdir = rd.getVar('D')
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
+
+ if args.strip and not args.dry_run:
+ # Fakeroot copy to new destination
+ srcdir = recipe_outdir
+ recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
+ if os.path.isdir(recipe_outdir):
+ bb.utils.remove(recipe_outdir, True)
+ exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
+ os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
+ oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
+ rd.getVar('base_libdir'))
+
+ filelist = []
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
+
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
+
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
+
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s %s \'sh %s %s %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ logger.info('Successfully deployed %s' % recipe_outdir)
+
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+def undeploy(args, config, basepath, workspace):
+ """Entry point for the devtool 'undeploy' subcommand"""
+ if args.all and args.recipename:
+ raise argparse_oe.ArgumentUsageError('Cannot specify -a/--all with a recipe name', 'undeploy-target')
+ elif not args.recipename and not args.all:
+ raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target')
+
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
+
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
+ args.target = args.target.split(':')[0]
+
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_undeploy.sh'
+ shellscript = _prepare_remote_script(deploy=False, dryrun=args.dry_run, undeployall=args.all)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Copy it to the target
+ ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = subprocess.call('ssh %s %s %s \'sh %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ if ret != 0:
+ raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ if not args.all and not args.dry_run:
+ logger.info('Successfully undeployed %s' % args.recipename)
+ return 0
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the deploy plugin"""
+
+ parser_deploy = subparsers.add_parser('deploy-target',
+ help='Deploy recipe output files to live target machine',
+ description='Deploys a recipe\'s build output (i.e. the output of the do_install task) to a live target machine over ssh. By default, any existing files will be preserved instead of being overwritten and will be restored if you run devtool undeploy-target. Note: this only deploys the recipe itself and not any runtime dependencies, so it is assumed that those have been installed on the target beforehand.',
+ group='testbuild')
+ parser_deploy.add_argument('recipename', help='Recipe to deploy')
+ parser_deploy.add_argument('target', help='Live target machine running an ssh server: user@hostname[:destdir]')
+ parser_deploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_deploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
+ parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
+ parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+
+ strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
+ strip_opts.add_argument('-S', '--strip',
+ help='Strip executables prior to deploying (default: %(default)s). '
+ 'The default value of this option can be controlled by setting the strip option in the [Deploy] section to True or False.',
+ default=oe.types.boolean(context.config.get('Deploy', 'strip', default='0')),
+ action='store_true')
+ strip_opts.add_argument('--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
+
+ parser_deploy.set_defaults(func=deploy)
+
+ parser_undeploy = subparsers.add_parser('undeploy-target',
+ help='Undeploy recipe output files in live target machine',
+ description='Un-deploys recipe output files previously deployed to a live target machine by devtool deploy-target.',
+ group='testbuild')
+ parser_undeploy.add_argument('recipename', help='Recipe to undeploy (if not using -a/--all)', nargs='?')
+ parser_undeploy.add_argument('target', help='Live target machine running an ssh server: user@hostname')
+ parser_undeploy.add_argument('-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
+ parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_undeploy.set_defaults(func=undeploy)
diff --git a/poky/scripts/lib/devtool/export.py b/poky/scripts/lib/devtool/export.py
new file mode 100644
index 000000000..13ee258e7
--- /dev/null
+++ b/poky/scripts/lib/devtool/export.py
@@ -0,0 +1,119 @@
+# Development tool - export command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool export plugin"""
+
+import os
+import argparse
+import tarfile
+import logging
+import datetime
+import json
+
+logger = logging.getLogger('devtool')
+
+# output files
+default_arcname_prefix = "workspace-export"
+metadata = '.export_metadata'
+
+def export(args, config, basepath, workspace):
+ """Entry point for the devtool 'export' subcommand"""
+
+ def add_metadata(tar):
+ """Archive the workspace object"""
+ # finally store the workspace metadata
+ with open(metadata, 'w') as fd:
+ fd.write(json.dumps((config.workspace_path, workspace)))
+ tar.add(metadata)
+ os.unlink(metadata)
+
+ def add_recipe(tar, recipe, data):
+ """Archive recipe with proper arcname"""
+ # Create a map of name/arcnames
+ arcnames = []
+ for key, name in data.items():
+ if name:
+ if key == 'srctree':
+ # all sources, no matter where are located, goes into the sources directory
+ arcname = 'sources/%s' % recipe
+ else:
+ arcname = name.replace(config.workspace_path, '')
+ arcnames.append((name, arcname))
+
+ for name, arcname in arcnames:
+ tar.add(name, arcname=arcname)
+
+
+ # Make sure workspace is non-empty and possible listed include/excluded recipes are in workspace
+ if not workspace:
+ logger.info('Workspace contains no recipes, nothing to export')
+ return 0
+ else:
+ for param, recipes in {'include':args.include,'exclude':args.exclude}.items():
+ for recipe in recipes:
+ if recipe not in workspace:
+ logger.error('Recipe (%s) on %s argument not in the current workspace' % (recipe, param))
+ return 1
+
+ name = args.file
+
+ default_name = "%s-%s.tar.gz" % (default_arcname_prefix, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
+ if not name:
+ name = default_name
+ else:
+ # if name is a directory, append the default name
+ if os.path.isdir(name):
+ name = os.path.join(name, default_name)
+
+ if os.path.exists(name) and not args.overwrite:
+ logger.error('Tar archive %s exists. Use --overwrite/-o to overwrite it')
+ return 1
+
+ # if all workspace is excluded, quit
+ if not len(set(workspace.keys()).difference(set(args.exclude))):
+ logger.warn('All recipes in workspace excluded, nothing to export')
+ return 0
+
+ exported = []
+ with tarfile.open(name, 'w:gz') as tar:
+ if args.include:
+ for recipe in args.include:
+ add_recipe(tar, recipe, workspace[recipe])
+ exported.append(recipe)
+ else:
+ for recipe, data in workspace.items():
+ if recipe not in args.exclude:
+ add_recipe(tar, recipe, data)
+ exported.append(recipe)
+
+ add_metadata(tar)
+
+ logger.info('Tar archive created at %s with the following recipes: %s' % (name, ', '.join(exported)))
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool export subcommands"""
+ parser = subparsers.add_parser('export',
+ help='Export workspace into a tar archive',
+ description='Export one or more recipes from current workspace into a tar archive',
+ group='advanced')
+
+ parser.add_argument('--file', '-f', help='Output archive file name')
+ parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite previous export tar archive')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--include', '-i', nargs='+', default=[], help='Include recipes into the tar archive')
+ group.add_argument('--exclude', '-e', nargs='+', default=[], help='Exclude recipes into the tar archive')
+ parser.set_defaults(func=export)
diff --git a/poky/scripts/lib/devtool/import.py b/poky/scripts/lib/devtool/import.py
new file mode 100644
index 000000000..c13a180d1
--- /dev/null
+++ b/poky/scripts/lib/devtool/import.py
@@ -0,0 +1,144 @@
+# Development tool - import command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool import plugin"""
+
+import os
+import tarfile
+import logging
+import collections
+import json
+import fnmatch
+
+from devtool import standard, setup_tinfoil, replace_from_file, DevtoolError
+from devtool import export
+
+logger = logging.getLogger('devtool')
+
+def devimport(args, config, basepath, workspace):
+ """Entry point for the devtool 'import' subcommand"""
+
+ def get_pn(name):
+ """ Returns the filename of a workspace recipe/append"""
+ metadata = name.split('/')[-1]
+ fn, _ = os.path.splitext(metadata)
+ return fn
+
+ if not os.path.exists(args.file):
+ raise DevtoolError('Tar archive %s does not exist. Export your workspace using "devtool export"' % args.file)
+
+ with tarfile.open(args.file) as tar:
+ # Get exported metadata
+ export_workspace_path = export_workspace = None
+ try:
+ metadata = tar.getmember(export.metadata)
+ except KeyError as ke:
+ raise DevtoolError('The export metadata file created by "devtool export" was not found. "devtool import" can only be used to import tar archives created by "devtool export".')
+
+ tar.extract(metadata)
+ with open(metadata.name) as fdm:
+ export_workspace_path, export_workspace = json.load(fdm)
+ os.unlink(metadata.name)
+
+ members = tar.getmembers()
+
+ # Get appends and recipes from the exported archive, these
+ # will be needed to find out those appends without corresponding
+ # recipe pair
+ append_fns, recipe_fns = set(), set()
+ for member in members:
+ if member.name.startswith('appends'):
+ append_fns.add(get_pn(member.name))
+ elif member.name.startswith('recipes'):
+ recipe_fns.add(get_pn(member.name))
+
+ # Setup tinfoil, get required data and shutdown
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ current_fns = [os.path.basename(recipe[0]) for recipe in tinfoil.cooker.recipecaches[''].pkg_fn.items()]
+ finally:
+ tinfoil.shutdown()
+
+ # Find those appends that do not have recipes in current metadata
+ non_importables = []
+ for fn in append_fns - recipe_fns:
+ # Check on current metadata (covering those layers indicated in bblayers.conf)
+ for current_fn in current_fns:
+ if fnmatch.fnmatch(current_fn, '*' + fn.replace('%', '') + '*'):
+ break
+ else:
+ non_importables.append(fn)
+ logger.warn('No recipe to append %s.bbapppend, skipping' % fn)
+
+ # Extract
+ imported = []
+ for member in members:
+ if member.name == export.metadata:
+ continue
+
+ for nonimp in non_importables:
+ pn = nonimp.split('_')[0]
+ # do not extract data from non-importable recipes or metadata
+ if member.name.startswith('appends/%s' % nonimp) or \
+ member.name.startswith('recipes/%s' % nonimp) or \
+ member.name.startswith('sources/%s' % pn):
+ break
+ else:
+ path = os.path.join(config.workspace_path, member.name)
+ if os.path.exists(path):
+ # by default, no file overwrite is done unless -o is given by the user
+ if args.overwrite:
+ try:
+ tar.extract(member, path=config.workspace_path)
+ except PermissionError as pe:
+ logger.warn(pe)
+ else:
+ logger.warn('File already present. Use --overwrite/-o to overwrite it: %s' % member.name)
+ continue
+ else:
+ tar.extract(member, path=config.workspace_path)
+
+ # Update EXTERNALSRC and the devtool md5 file
+ if member.name.startswith('appends'):
+ if export_workspace_path:
+ # appends created by 'devtool modify' just need to update the workspace
+ replace_from_file(path, export_workspace_path, config.workspace_path)
+
+ # appends created by 'devtool add' need replacement of exported source tree
+ pn = get_pn(member.name).split('_')[0]
+ exported_srctree = export_workspace[pn]['srctree']
+ if exported_srctree:
+ replace_from_file(path, exported_srctree, os.path.join(config.workspace_path, 'sources', pn))
+
+ standard._add_md5(config, pn, path)
+ imported.append(pn)
+
+ if imported:
+ logger.info('Imported recipes into workspace %s: %s' % (config.workspace_path, ', '.join(imported)))
+ else:
+ logger.warn('No recipes imported into the workspace')
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool import subcommands"""
+ parser = subparsers.add_parser('import',
+ help='Import exported tar archive into workspace',
+ description='Import tar archive previously created by "devtool export" into workspace',
+ group='advanced')
+ parser.add_argument('file', metavar='FILE', help='Name of the tar archive to import')
+ parser.add_argument('--overwrite', '-o', action="store_true", help='Overwrite files when extracting')
+ parser.set_defaults(func=devimport)
diff --git a/poky/scripts/lib/devtool/package.py b/poky/scripts/lib/devtool/package.py
new file mode 100644
index 000000000..af9e8f15f
--- /dev/null
+++ b/poky/scripts/lib/devtool/package.py
@@ -0,0 +1,60 @@
+# Development tool - package command plugin
+#
+# Copyright (C) 2014-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool plugin containing the package subcommands"""
+
+import os
+import subprocess
+import logging
+from bb.process import ExecutionError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def package(args, config, basepath, workspace):
+ """Entry point for the devtool 'package' subcommand"""
+ check_workspace_recipe(workspace, args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath, config_only=True)
+ try:
+ image_pkgtype = config.get('Package', 'image_pkgtype', '')
+ if not image_pkgtype:
+ image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE')
+
+ deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper())
+ finally:
+ tinfoil.shutdown()
+
+ package_task = config.get('Package', 'package_task', 'package_write_%s' % image_pkgtype)
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c %s %s' % (package_task, args.recipename), watch=True)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+
+ logger.info('Your packages are in %s' % deploy_dir_pkg)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the package plugin"""
+ if context.fixed_setup:
+ parser_package = subparsers.add_parser('package',
+ help='Build packages for a recipe',
+ description='Builds packages for a recipe\'s output files',
+ group='testbuild', order=-5)
+ parser_package.add_argument('recipename', help='Recipe to package')
+ parser_package.set_defaults(func=package)
diff --git a/poky/scripts/lib/devtool/runqemu.py b/poky/scripts/lib/devtool/runqemu.py
new file mode 100644
index 000000000..e26cf28c2
--- /dev/null
+++ b/poky/scripts/lib/devtool/runqemu.py
@@ -0,0 +1,74 @@
+# Development tool - runqemu command plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool runqemu plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import glob
+from devtool import exec_build_env_command, setup_tinfoil, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def runqemu(args, config, basepath, workspace):
+ """Entry point for the devtool 'runqemu' subcommand"""
+
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ machine = tinfoil.config_data.getVar('MACHINE')
+ bindir_native = os.path.join(tinfoil.config_data.getVar('STAGING_DIR'),
+ tinfoil.config_data.getVar('BUILD_ARCH'),
+ tinfoil.config_data.getVar('bindir_native').lstrip(os.path.sep))
+ finally:
+ tinfoil.shutdown()
+
+ if not glob.glob(os.path.join(bindir_native, 'qemu-system-*')):
+ raise DevtoolError('QEMU is not available within this SDK')
+
+ imagename = args.imagename
+ if not imagename:
+ sdk_targets = config.get('SDK', 'sdk_targets', '').split()
+ if sdk_targets:
+ imagename = sdk_targets[0]
+ if not imagename:
+ raise DevtoolError('Unable to determine image name to run, please specify one')
+
+ try:
+ # FIXME runqemu assumes that if OECORE_NATIVE_SYSROOT is set then it shouldn't
+ # run bitbake to find out the values of various environment variables, which
+ # isn't the case for the extensible SDK. Work around it for now.
+ newenv = dict(os.environ)
+ newenv.pop('OECORE_NATIVE_SYSROOT', '')
+ exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True, env=newenv)
+ except bb.process.ExecutionError as e:
+ # We've already seen the output since watch=True, so just ensure we return something to the user
+ return e.exitcode
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ if context.fixed_setup:
+ parser_runqemu = subparsers.add_parser('runqemu', help='Run QEMU on the specified image',
+ description='Runs QEMU to boot the specified image',
+ group='testbuild', order=-20)
+ parser_runqemu.add_argument('imagename', help='Name of built image to boot within QEMU', nargs='?')
+ parser_runqemu.add_argument('args', help='Any remaining arguments are passed to the runqemu script (pass --help after imagename to see what these are)',
+ nargs=argparse.REMAINDER)
+ parser_runqemu.set_defaults(func=runqemu)
diff --git a/poky/scripts/lib/devtool/sdk.py b/poky/scripts/lib/devtool/sdk.py
new file mode 100644
index 000000000..f46577c2a
--- /dev/null
+++ b/poky/scripts/lib/devtool/sdk.py
@@ -0,0 +1,336 @@
+# Development tool - sdk-update command plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import subprocess
+import logging
+import glob
+import shutil
+import errno
+import sys
+import tempfile
+import re
+from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def parse_locked_sigs(sigfile_path):
+ """Return <pn:task>:<hash> dictionary"""
+ sig_dict = {}
+ with open(sigfile_path) as f:
+ lines = f.readlines()
+ for line in lines:
+ if ':' in line:
+ taskkey, _, hashval = line.rpartition(':')
+ sig_dict[taskkey.strip()] = hashval.split()[0]
+ return sig_dict
+
+def generate_update_dict(sigfile_new, sigfile_old):
+ """Return a dict containing <pn:task>:<hash> which indicates what need to be updated"""
+ update_dict = {}
+ sigdict_new = parse_locked_sigs(sigfile_new)
+ sigdict_old = parse_locked_sigs(sigfile_old)
+ for k in sigdict_new:
+ if k not in sigdict_old:
+ update_dict[k] = sigdict_new[k]
+ continue
+ if sigdict_new[k] != sigdict_old[k]:
+ update_dict[k] = sigdict_new[k]
+ continue
+ return update_dict
+
+def get_sstate_objects(update_dict, sstate_dir):
+ """Return a list containing sstate objects which are to be installed"""
+ sstate_objects = []
+ for k in update_dict:
+ files = set()
+ hashval = update_dict[k]
+ p = sstate_dir + '/' + hashval[:2] + '/*' + hashval + '*.tgz'
+ files |= set(glob.glob(p))
+ p = sstate_dir + '/*/' + hashval[:2] + '/*' + hashval + '*.tgz'
+ files |= set(glob.glob(p))
+ files = list(files)
+ if len(files) == 1:
+ sstate_objects.extend(files)
+ elif len(files) > 1:
+ logger.error("More than one matching sstate object found for %s" % hashval)
+
+ return sstate_objects
+
+def mkdir(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+def install_sstate_objects(sstate_objects, src_sdk, dest_sdk):
+ """Install sstate objects into destination SDK"""
+ sstate_dir = os.path.join(dest_sdk, 'sstate-cache')
+ if not os.path.exists(sstate_dir):
+ logger.error("Missing sstate-cache directory in %s, it might not be an extensible SDK." % dest_sdk)
+ raise
+ for sb in sstate_objects:
+ dst = sb.replace(src_sdk, dest_sdk)
+ destdir = os.path.dirname(dst)
+ mkdir(destdir)
+ logger.debug("Copying %s to %s" % (sb, dst))
+ shutil.copy(sb, dst)
+
+def check_manifest(fn, basepath):
+ import bb.utils
+ changedfiles = []
+ with open(fn, 'r') as f:
+ for line in f:
+ splitline = line.split()
+ if len(splitline) > 1:
+ chksum = splitline[0]
+ fpath = splitline[1]
+ curr_chksum = bb.utils.sha256_file(os.path.join(basepath, fpath))
+ if chksum != curr_chksum:
+ logger.debug('File %s changed: old csum = %s, new = %s' % (os.path.join(basepath, fpath), curr_chksum, chksum))
+ changedfiles.append(fpath)
+ return changedfiles
+
+def sdk_update(args, config, basepath, workspace):
+ """Entry point for devtool sdk-update command"""
+ updateserver = args.updateserver
+ if not updateserver:
+ updateserver = config.get('SDK', 'updateserver', '')
+ logger.debug("updateserver: %s" % updateserver)
+
+ # Make sure we are using sdk-update from within SDK
+ logger.debug("basepath = %s" % basepath)
+ old_locked_sig_file_path = os.path.join(basepath, 'conf/locked-sigs.inc')
+ if not os.path.exists(old_locked_sig_file_path):
+ logger.error("Not using devtool's sdk-update command from within an extensible SDK. Please specify correct basepath via --basepath option")
+ return -1
+ else:
+ logger.debug("Found conf/locked-sigs.inc in %s" % basepath)
+
+ if not '://' in updateserver:
+ logger.error("Update server must be a URL")
+ return -1
+
+ layers_dir = os.path.join(basepath, 'layers')
+ conf_dir = os.path.join(basepath, 'conf')
+
+ # Grab variable values
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR')
+ sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS')
+ site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION')
+ finally:
+ tinfoil.shutdown()
+
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ os.makedirs(os.path.join(tmpsdk_dir, 'conf'))
+ new_locked_sig_file_path = os.path.join(tmpsdk_dir, 'conf', 'locked-sigs.inc')
+ # Fetch manifest from server
+ tmpmanifest = os.path.join(tmpsdk_dir, 'conf', 'sdk-conf-manifest')
+ ret = subprocess.call("wget -q -O %s %s/conf/sdk-conf-manifest" % (tmpmanifest, updateserver), shell=True)
+ changedfiles = check_manifest(tmpmanifest, basepath)
+ if not changedfiles:
+ logger.info("Already up-to-date")
+ return 0
+ # Update metadata
+ logger.debug("Updating metadata via git ...")
+ #Check for the status before doing a fetch and reset
+ if os.path.exists(os.path.join(basepath, 'layers/.git')):
+ out = subprocess.check_output("git status --porcelain", shell=True, cwd=layers_dir)
+ if not out:
+ ret = subprocess.call("git fetch --all; git reset --hard @{u}", shell=True, cwd=layers_dir)
+ else:
+ logger.error("Failed to update metadata as there have been changes made to it. Aborting.");
+ logger.error("Changed files:\n%s" % out);
+ return -1
+ else:
+ ret = -1
+ if ret != 0:
+ ret = subprocess.call("git clone %s/layers/.git" % updateserver, shell=True, cwd=tmpsdk_dir)
+ if ret != 0:
+ logger.error("Updating metadata via git failed")
+ return ret
+ logger.debug("Updating conf files ...")
+ for changedfile in changedfiles:
+ ret = subprocess.call("wget -q -O %s %s/%s" % (changedfile, updateserver, changedfile), shell=True, cwd=tmpsdk_dir)
+ if ret != 0:
+ logger.error("Updating %s failed" % changedfile)
+ return ret
+
+ # Check if UNINATIVE_CHECKSUM changed
+ uninative = False
+ if 'conf/local.conf' in changedfiles:
+ def read_uninative_checksums(fn):
+ chksumitems = []
+ with open(fn, 'r') as f:
+ for line in f:
+ if line.startswith('UNINATIVE_CHECKSUM'):
+ splitline = re.split(r'[\[\]"\']', line)
+ if len(splitline) > 3:
+ chksumitems.append((splitline[1], splitline[3]))
+ return chksumitems
+
+ oldsums = read_uninative_checksums(os.path.join(basepath, 'conf/local.conf'))
+ newsums = read_uninative_checksums(os.path.join(tmpsdk_dir, 'conf/local.conf'))
+ if oldsums != newsums:
+ uninative = True
+ for buildarch, chksum in newsums:
+ uninative_file = os.path.join('downloads', 'uninative', chksum, '%s-nativesdk-libc.tar.bz2' % buildarch)
+ mkdir(os.path.join(tmpsdk_dir, os.path.dirname(uninative_file)))
+ ret = subprocess.call("wget -q -O %s %s/%s" % (uninative_file, updateserver, uninative_file), shell=True, cwd=tmpsdk_dir)
+
+ # Ok, all is well at this point - move everything over
+ tmplayers_dir = os.path.join(tmpsdk_dir, 'layers')
+ if os.path.exists(tmplayers_dir):
+ shutil.rmtree(layers_dir)
+ shutil.move(tmplayers_dir, layers_dir)
+ for changedfile in changedfiles:
+ destfile = os.path.join(basepath, changedfile)
+ os.remove(destfile)
+ shutil.move(os.path.join(tmpsdk_dir, changedfile), destfile)
+ os.remove(os.path.join(conf_dir, 'sdk-conf-manifest'))
+ shutil.move(tmpmanifest, conf_dir)
+ if uninative:
+ shutil.rmtree(os.path.join(basepath, 'downloads', 'uninative'))
+ shutil.move(os.path.join(tmpsdk_dir, 'downloads', 'uninative'), os.path.join(basepath, 'downloads'))
+
+ if not sstate_mirrors:
+ with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
+ f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
+ f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ finally:
+ shutil.rmtree(tmpsdk_dir)
+
+ if not args.skip_prepare:
+ # Find all potentially updateable tasks
+ sdk_update_targets = []
+ tasks = ['do_populate_sysroot', 'do_packagedata']
+ for root, _, files in os.walk(stamps_dir):
+ for fn in files:
+ if not '.sigdata.' in fn:
+ for task in tasks:
+ if '.%s.' % task in fn or '.%s_setscene.' % task in fn:
+ sdk_update_targets.append('%s:%s' % (os.path.basename(root), task))
+ # Run bitbake command for the whole SDK
+ logger.info("Preparing build system... (This may take some time.)")
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake --setscene-only %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
+ output, _ = exec_build_env_command(config.init_path, basepath, 'bitbake -n %s' % ' '.join(sdk_update_targets), stderr=subprocess.STDOUT)
+ runlines = []
+ for line in output.splitlines():
+ if 'Running task ' in line:
+ runlines.append(line)
+ if runlines:
+ logger.error('Unexecuted tasks found in preparation log:\n %s' % '\n '.join(runlines))
+ return -1
+ except bb.process.ExecutionError as e:
+ logger.error('Preparation failed:\n%s' % e.stdout)
+ return -1
+ return 0
+
+def sdk_install(args, config, basepath, workspace):
+ """Entry point for the devtool sdk-install command"""
+
+ import oe.recipeutils
+ import bb.process
+
+ for recipe in args.recipename:
+ if recipe in workspace:
+ raise DevtoolError('recipe %s is a recipe in your workspace' % recipe)
+
+ tasks = ['do_populate_sysroot', 'do_packagedata']
+ stampprefixes = {}
+ def checkstamp(recipe):
+ stampprefix = stampprefixes[recipe]
+ stamps = glob.glob(stampprefix + '*')
+ for stamp in stamps:
+ if '.sigdata.' not in stamp and stamp.startswith((stampprefix + '.', stampprefix + '_setscene.')):
+ return True
+ else:
+ return False
+
+ install_recipes = []
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ for recipe in args.recipename:
+ rd = parse_recipe(config, tinfoil, recipe, True)
+ if not rd:
+ return 1
+ stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP'), tasks[0])
+ if checkstamp(recipe):
+ logger.info('%s is already installed' % recipe)
+ else:
+ install_recipes.append(recipe)
+ finally:
+ tinfoil.shutdown()
+
+ if install_recipes:
+ logger.info('Installing %s...' % ', '.join(install_recipes))
+ install_tasks = []
+ for recipe in install_recipes:
+ for task in tasks:
+ if recipe.endswith('-native') and 'package' in task:
+ continue
+ install_tasks.append('%s:%s' % (recipe, task))
+ options = ''
+ if not args.allow_build:
+ options += ' --setscene-only'
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake %s %s' % (options, ' '.join(install_tasks)), watch=True)
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Failed to install %s:\n%s' % (recipe, str(e)))
+ failed = False
+ for recipe in install_recipes:
+ if checkstamp(recipe):
+ logger.info('Successfully installed %s' % recipe)
+ else:
+ raise DevtoolError('Failed to install %s - unavailable' % recipe)
+ failed = True
+ if failed:
+ return 2
+
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from the sdk plugin"""
+ if context.fixed_setup:
+ parser_sdk = subparsers.add_parser('sdk-update',
+ help='Update SDK components',
+ description='Updates installed SDK components from a remote server',
+ group='sdk')
+ updateserver = context.config.get('SDK', 'updateserver', '')
+ if updateserver:
+ parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from (default %s)' % updateserver, nargs='?')
+ else:
+ parser_sdk.add_argument('updateserver', help='The update server to fetch latest SDK components from')
+ parser_sdk.add_argument('--skip-prepare', action="store_true", help='Skip re-preparing the build system after updating (for debugging only)')
+ parser_sdk.set_defaults(func=sdk_update)
+
+ parser_sdk_install = subparsers.add_parser('sdk-install',
+ help='Install additional SDK components',
+ description='Installs additional recipe development files into the SDK. (You can use "devtool search" to find available recipes.)',
+ group='sdk')
+ parser_sdk_install.add_argument('recipename', help='Name of the recipe to install the development artifacts for', nargs='+')
+ parser_sdk_install.add_argument('-s', '--allow-build', help='Allow building requested item(s) from source', action='store_true')
+ parser_sdk_install.set_defaults(func=sdk_install)
diff --git a/poky/scripts/lib/devtool/search.py b/poky/scripts/lib/devtool/search.py
new file mode 100644
index 000000000..b4f209b7e
--- /dev/null
+++ b/poky/scripts/lib/devtool/search.py
@@ -0,0 +1,118 @@
+# Development tool - search command plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool search plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+from devtool import setup_tinfoil, parse_recipe, DevtoolError
+
+logger = logging.getLogger('devtool')
+
+def search(args, config, basepath, workspace):
+ """Entry point for the devtool 'search' subcommand"""
+
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ defsummary = tinfoil.config_data.getVar('SUMMARY', False) or ''
+
+ keyword_rc = re.compile(args.keyword)
+
+ def print_match(pn):
+ rd = parse_recipe(config, tinfoil, pn, True)
+ if not rd:
+ return
+ summary = rd.getVar('SUMMARY')
+ if summary == rd.expand(defsummary):
+ summary = ''
+ print("%s %s" % (pn.ljust(20), summary))
+
+
+ matches = []
+ if os.path.exists(pkgdata_dir):
+ for fn in os.listdir(pkgdata_dir):
+ pfn = os.path.join(pkgdata_dir, fn)
+ if not os.path.isfile(pfn):
+ continue
+
+ packages = []
+ match = False
+ if keyword_rc.search(fn):
+ match = True
+
+ if not match:
+ with open(pfn, 'r') as f:
+ for line in f:
+ if line.startswith('PACKAGES:'):
+ packages = line.split(':', 1)[1].strip().split()
+
+ for pkg in packages:
+ if keyword_rc.search(pkg):
+ match = True
+ break
+ if os.path.exists(os.path.join(pkgdata_dir, 'runtime', pkg + '.packaged')):
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
+ for line in f:
+ if ': ' in line:
+ splitline = line.split(':', 1)
+ key = splitline[0]
+ value = splitline[1].strip()
+ if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ if keyword_rc.search(value):
+ match = True
+ break
+ if match:
+ print_match(fn)
+ matches.append(fn)
+ else:
+ logger.warning('Package data is not available, results may be limited')
+
+ for recipe in tinfoil.all_recipes():
+ if args.fixed_setup and 'nativesdk' in recipe.inherits():
+ continue
+
+ match = False
+ if keyword_rc.search(recipe.pn):
+ match = True
+ else:
+ for prov in recipe.provides:
+ if keyword_rc.search(prov):
+ match = True
+ break
+ if not match:
+ for rprov in recipe.rprovides:
+ if keyword_rc.search(rprov):
+ match = True
+ break
+ if match and not recipe.pn in matches:
+ print_match(recipe.pn)
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_search = subparsers.add_parser('search', help='Search available recipes',
+ description='Searches for available recipes. Matches on recipe name, package name, description and installed files, and prints the recipe name and summary on match.',
+ group='info')
+ parser_search.add_argument('keyword', help='Keyword to search for (regular expression syntax allowed, use quotes to avoid shell expansion)')
+ parser_search.set_defaults(func=search, no_workspace=True, fixed_setup=context.fixed_setup)
diff --git a/poky/scripts/lib/devtool/standard.py b/poky/scripts/lib/devtool/standard.py
new file mode 100644
index 000000000..a1e8e1d32
--- /dev/null
+++ b/poky/scripts/lib/devtool/standard.py
@@ -0,0 +1,2164 @@
+# Development tool - standard commands plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Devtool standard plugins"""
+
+import os
+import sys
+import re
+import shutil
+import subprocess
+import tempfile
+import logging
+import argparse
+import argparse_oe
+import scriptutils
+import errno
+import glob
+import filecmp
+from collections import OrderedDict
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, use_external_build, setup_git_repo, recipe_to_append, get_bbclassextend_targets, update_unlockedsigs, check_prerelease_version, check_git_repo_dirty, check_git_repo_op, DevtoolError
+from devtool import parse_recipe
+
+logger = logging.getLogger('devtool')
+
+override_branch_prefix = 'devtool-override-'
+
+
+def add(args, config, basepath, workspace):
+ """Entry point for the devtool 'add' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ if not args.recipename and not args.srctree and not args.fetch and not args.fetchuri:
+ raise argparse_oe.ArgumentUsageError('At least one of recipename, srctree, fetchuri or -f/--fetch must be specified', 'add')
+
+ # These are positional arguments, but because we're nice, allow
+ # specifying e.g. source tree without name, or fetch URI without name or
+ # source tree (if we can detect that that is what the user meant)
+ if scriptutils.is_src_url(args.recipename):
+ if not args.fetchuri:
+ if args.fetch:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ args.fetchuri = args.recipename
+ args.recipename = ''
+ elif scriptutils.is_src_url(args.srctree):
+ if not args.fetchuri:
+ if args.fetch:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ args.fetchuri = args.srctree
+ args.srctree = ''
+ elif args.recipename and not args.srctree:
+ if os.sep in args.recipename:
+ args.srctree = args.recipename
+ args.recipename = None
+ elif os.path.isdir(args.recipename):
+ logger.warn('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
+
+ if not args.fetchuri:
+ if args.srcrev:
+ raise DevtoolError('The -S/--srcrev option is only valid when fetching from an SCM repository')
+ if args.srcbranch:
+ raise DevtoolError('The -B/--srcbranch option is only valid when fetching from an SCM repository')
+
+ if args.srctree and os.path.isfile(args.srctree):
+ args.fetchuri = 'file://' + os.path.abspath(args.srctree)
+ args.srctree = ''
+
+ if args.fetch:
+ if args.fetchuri:
+ raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
+ else:
+ logger.warn('-f/--fetch option is deprecated - you can now simply specify the URL to fetch as a positional argument instead')
+ args.fetchuri = args.fetch
+
+ if args.recipename:
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ args.recipename)
+ reason = oe.recipeutils.validate_pn(args.recipename)
+ if reason:
+ raise DevtoolError(reason)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ srctreeparent = None
+ tmpsrcdir = None
+ else:
+ srctree = None
+ srctreeparent = get_default_srctree(config)
+ bb.utils.mkdirhier(srctreeparent)
+ tmpsrcdir = tempfile.mkdtemp(prefix='devtoolsrc', dir=srctreeparent)
+
+ if srctree and os.path.exists(srctree):
+ if args.fetchuri:
+ if not os.path.isdir(srctree):
+ raise DevtoolError("Cannot fetch into source tree path %s as "
+ "it exists and is not a directory" %
+ srctree)
+ elif os.listdir(srctree):
+ raise DevtoolError("Cannot fetch into source tree path %s as "
+ "it already exists and is non-empty" %
+ srctree)
+ elif not args.fetchuri:
+ if args.srctree:
+ raise DevtoolError("Specified source tree %s could not be found" %
+ args.srctree)
+ elif srctree:
+ raise DevtoolError("No source tree exists at default path %s - "
+ "either create and populate this directory, "
+ "or specify a path to a source tree, or a "
+ "URI to fetch source from" % srctree)
+ else:
+ raise DevtoolError("You must either specify a source tree "
+ "or a URI to fetch source from")
+
+ if args.version:
+ if '_' in args.version or ' ' in args.version:
+ raise DevtoolError('Invalid version string "%s"' % args.version)
+
+ if args.color == 'auto' and sys.stdout.isatty():
+ color = 'always'
+ else:
+ color = args.color
+ extracmdopts = ''
+ if args.fetchuri:
+ source = args.fetchuri
+ if srctree:
+ extracmdopts += ' -x %s' % srctree
+ else:
+ extracmdopts += ' -x %s' % tmpsrcdir
+ else:
+ source = srctree
+ if args.recipename:
+ extracmdopts += ' -N %s' % args.recipename
+ if args.version:
+ extracmdopts += ' -V %s' % args.version
+ if args.binary:
+ extracmdopts += ' -b'
+ if args.also_native:
+ extracmdopts += ' --also-native'
+ if args.src_subdir:
+ extracmdopts += ' --src-subdir "%s"' % args.src_subdir
+ if args.autorev:
+ extracmdopts += ' -a'
+ if args.fetch_dev:
+ extracmdopts += ' --fetch-dev'
+ if args.mirrors:
+ extracmdopts += ' --mirrors'
+ if args.srcrev:
+ extracmdopts += ' --srcrev %s' % args.srcrev
+ if args.srcbranch:
+ extracmdopts += ' --srcbranch %s' % args.srcbranch
+ if args.provides:
+ extracmdopts += ' --provides %s' % args.provides
+
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create --devtool -o %s \'%s\' %s' % (color, tempdir, source, extracmdopts), watch=True)
+ except bb.process.ExecutionError as e:
+ if e.exitcode == 15:
+ raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
+ else:
+ raise DevtoolError('Command \'%s\' failed' % e.command)
+
+ recipes = glob.glob(os.path.join(tempdir, '*.bb'))
+ if recipes:
+ recipename = os.path.splitext(os.path.basename(recipes[0]))[0].split('_')[0]
+ if recipename in workspace:
+ raise DevtoolError('A recipe with the same name as the one being created (%s) already exists in your workspace' % recipename)
+ recipedir = os.path.join(config.workspace_path, 'recipes', recipename)
+ bb.utils.mkdirhier(recipedir)
+ recipefile = os.path.join(recipedir, os.path.basename(recipes[0]))
+ appendfile = recipe_to_append(recipefile, config)
+ if os.path.exists(appendfile):
+ # This shouldn't be possible, but just in case
+ raise DevtoolError('A recipe with the same name as the one being created already exists in your workspace')
+ if os.path.exists(recipefile):
+ raise DevtoolError('A recipe file %s already exists in your workspace; this shouldn\'t be there - please delete it before continuing' % recipefile)
+ if tmpsrcdir:
+ srctree = os.path.join(srctreeparent, recipename)
+ if os.path.exists(tmpsrcdir):
+ if os.path.exists(srctree):
+ if os.path.isdir(srctree):
+ try:
+ os.rmdir(srctree)
+ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+ raise DevtoolError('Source tree path %s already exists and is not empty' % srctree)
+ else:
+ raise
+ else:
+ raise DevtoolError('Source tree path %s already exists and is not a directory' % srctree)
+ logger.info('Using default source tree path %s' % srctree)
+ shutil.move(tmpsrcdir, srctree)
+ else:
+ raise DevtoolError('Couldn\'t find source tree created by recipetool')
+ bb.utils.mkdirhier(recipedir)
+ shutil.move(recipes[0], recipefile)
+ # Move any additional files created by recipetool
+ for fn in os.listdir(tempdir):
+ shutil.move(os.path.join(tempdir, fn), recipedir)
+ else:
+ raise DevtoolError('Command \'%s\' did not create any recipe file:\n%s' % (e.command, e.stdout))
+ attic_recipe = os.path.join(config.workspace_path, 'attic', recipename, os.path.basename(recipefile))
+ if os.path.exists(attic_recipe):
+ logger.warn('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
+ finally:
+ if tmpsrcdir and os.path.exists(tmpsrcdir):
+ shutil.rmtree(tmpsrcdir)
+ shutil.rmtree(tempdir)
+
+ for fn in os.listdir(recipedir):
+ _add_md5(config, recipename, os.path.join(recipedir, fn))
+
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ try:
+ rd = tinfoil.parse_recipe_file(recipefile, False)
+ except Exception as e:
+ logger.error(str(e))
+ rd = None
+ if not rd:
+ # Parsing failed. We just created this recipe and we shouldn't
+ # leave it in the workdir or it'll prevent bitbake from starting
+ movefn = '%s.parsefailed' % recipefile
+ logger.error('Parsing newly created recipe failed, moving recipe to %s for reference. If this looks to be caused by the recipe itself, please report this error.' % movefn)
+ shutil.move(recipefile, movefn)
+ return 1
+
+ if args.fetchuri and not args.no_git:
+ setup_git_repo(srctree, args.version, 'devtool', d=tinfoil.config_data)
+
+ initial_rev = None
+ if os.path.exists(os.path.join(srctree, '.git')):
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
+
+ if args.src_subdir:
+ srctree = os.path.join(srctree, args.src_subdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('inherit externalsrc\n')
+ f.write('EXTERNALSRC = "%s"\n' % srctree)
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+
+ if args.binary:
+ f.write('do_install_append() {\n')
+ f.write(' rm -rf ${D}/.git\n')
+ f.write(' rm -f ${D}/singletask.lock\n')
+ f.write('}\n')
+
+ if bb.data.inherits_class('npm', rd):
+ f.write('do_install_append() {\n')
+ f.write(' # Remove files added to source dir by devtool/externalsrc\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
+ f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
+ f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
+ f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
+ f.write(' done\n')
+ f.write('}\n')
+
+ # Check if the new layer provides recipes whose priorities have been
+ # overriden by PREFERRED_PROVIDER.
+ recipe_name = rd.getVar('PN')
+ provides = rd.getVar('PROVIDES')
+ # Search every item defined in PROVIDES
+ for recipe_provided in provides.split():
+ preferred_provider = 'PREFERRED_PROVIDER_' + recipe_provided
+ current_pprovider = rd.getVar(preferred_provider)
+ if current_pprovider and current_pprovider != recipe_name:
+ if args.fixed_setup:
+ #if we are inside the eSDK add the new PREFERRED_PROVIDER in the workspace layer.conf
+ layerconf_file = os.path.join(config.workspace_path, "conf", "layer.conf")
+ with open(layerconf_file, 'a') as f:
+ f.write('%s = "%s"\n' % (preferred_provider, recipe_name))
+ else:
+ logger.warn('Set \'%s\' in order to use the recipe' % preferred_provider)
+ break
+
+ _add_md5(config, recipename, appendfile)
+
+ check_prerelease_version(rd.getVar('PV'), 'devtool add')
+
+ logger.info('Recipe %s has been automatically created; further editing may be required to make it fully functional' % recipefile)
+
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+
+def _check_compatible_recipe(pn, d):
+ """Check if the recipe is supported by devtool"""
+ if pn == 'perf':
+ raise DevtoolError("The perf recipe does not actually check out "
+ "source and thus cannot be supported by this tool",
+ 4)
+
+ if pn in ['kernel-devsrc', 'package-index'] or pn.startswith('gcc-source'):
+ raise DevtoolError("The %s recipe is not supported by this tool" % pn, 4)
+
+ if bb.data.inherits_class('image', d):
+ raise DevtoolError("The %s recipe is an image, and therefore is not "
+ "supported by this tool" % pn, 4)
+
+ if bb.data.inherits_class('populate_sdk', d):
+ raise DevtoolError("The %s recipe is an SDK, and therefore is not "
+ "supported by this tool" % pn, 4)
+
+ if bb.data.inherits_class('packagegroup', d):
+ raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
+ "not supported by this tool" % pn, 4)
+
+ if bb.data.inherits_class('meta', d):
+ raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
+ "not supported by this tool" % pn, 4)
+
+ if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
+ # Not an incompatibility error per se, so we don't pass the error code
+ raise DevtoolError("externalsrc is currently enabled for the %s "
+ "recipe. This prevents the normal do_patch task "
+ "from working. You will need to disable this "
+ "first." % pn)
+
+def _dry_run_copy(src, dst, dry_run_outdir, base_outdir):
+ """Common function for copying a file to the dry run output directory"""
+ relpath = os.path.relpath(dst, base_outdir)
+ if relpath.startswith('..'):
+ raise Exception('Incorrect base path %s for path %s' % (base_outdir, dst))
+ dst = os.path.join(dry_run_outdir, relpath)
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ # Don't overwrite existing files, otherwise in the case of an upgrade
+ # the dry-run written out recipe will be overwritten with an unmodified
+ # version
+ if not os.path.exists(dst):
+ shutil.copy(src, dst)
+
+def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
+ """Move a file. Creates all the directory components of destination path."""
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+ logger.debug('Moving %s to %s%s' % (src, dst, dry_run_suffix))
+ if dry_run_outdir:
+ # We want to copy here, not move
+ _dry_run_copy(src, dst, dry_run_outdir, base_outdir)
+ else:
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.move(src, dst)
+
+def _copy_file(src, dst, dry_run_outdir=None):
+ """Copy a file. Creates all the directory components of destination path."""
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+ logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
+ if dry_run_outdir:
+ _dry_run_copy(src, dst, dry_run_outdir, base_outdir)
+ else:
+ dst_d = os.path.dirname(dst)
+ if dst_d:
+ bb.utils.mkdirhier(dst_d)
+ shutil.copy(src, dst)
+
+def _git_ls_tree(repodir, treeish='HEAD', recursive=False):
+ """List contents of a git treeish"""
+ import bb
+ cmd = ['git', 'ls-tree', '-z', treeish]
+ if recursive:
+ cmd.append('-r')
+ out, _ = bb.process.run(cmd, cwd=repodir)
+ ret = {}
+ if out:
+ for line in out.split('\0'):
+ if line:
+ split = line.split(None, 4)
+ ret[split[3]] = split[0:3]
+ return ret
+
+def _git_exclude_path(srctree, path):
+ """Return pathspec (list of paths) that excludes certain path"""
+ # NOTE: "Filtering out" files/paths in this way is not entirely reliable -
+ # we don't catch files that are deleted, for example. A more reliable way
+ # to implement this would be to use "negative pathspecs" which were
+ # introduced in Git v1.9.0. Revisit this when/if the required Git version
+ # becomes greater than that.
+ path = os.path.normpath(path)
+ recurse = True if len(path.split(os.path.sep)) > 1 else False
+ git_files = list(_git_ls_tree(srctree, 'HEAD', recurse).keys())
+ if path in git_files:
+ git_files.remove(path)
+ return git_files
+ else:
+ return ['.']
+
+def _ls_tree(directory):
+ """Recursive listing of files in a directory"""
+ ret = []
+ for root, dirs, files in os.walk(directory):
+ ret.extend([os.path.relpath(os.path.join(root, fname), directory) for
+ fname in files])
+ return ret
+
+
+def extract(args, config, basepath, workspace):
+ """Entry point for the devtool 'extract' subcommand"""
+ import bb
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ if not tinfoil:
+ # Error already shown
+ return 1
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ srctree = os.path.abspath(args.srctree)
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ logger.info('Source tree extracted to %s' % srctree)
+
+ if initial_rev:
+ return 0
+ else:
+ return 1
+ finally:
+ tinfoil.shutdown()
+
+def sync(args, config, basepath, workspace):
+ """Entry point for the devtool 'sync' subcommand"""
+ import bb
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ if not tinfoil:
+ # Error already shown
+ return 1
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ srctree = os.path.abspath(args.srctree)
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, True, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=True)
+ logger.info('Source tree %s synchronized' % srctree)
+
+ if initial_rev:
+ return 0
+ else:
+ return 1
+ finally:
+ tinfoil.shutdown()
+
+
+def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
+ """Extract sources of a recipe"""
+ import oe.recipeutils
+ import oe.patch
+
+ pn = d.getVar('PN')
+
+ _check_compatible_recipe(pn, d)
+
+ if sync:
+ if not os.path.exists(srctree):
+ raise DevtoolError("output path %s does not exist" % srctree)
+ else:
+ if os.path.exists(srctree):
+ if not os.path.isdir(srctree):
+ raise DevtoolError("output path %s exists and is not a directory" %
+ srctree)
+ elif os.listdir(srctree):
+ raise DevtoolError("output path %s already exists and is "
+ "non-empty" % srctree)
+
+ if 'noexec' in (d.getVarFlags('do_unpack', False) or []):
+ raise DevtoolError("The %s recipe has do_unpack disabled, unable to "
+ "extract source" % pn, 4)
+
+ if not sync:
+ # Prepare for shutil.move later on
+ bb.utils.mkdirhier(srctree)
+ os.rmdir(srctree)
+
+ extra_overrides = []
+ if not no_overrides:
+ history = d.varhistory.variable('SRC_URI')
+ for event in history:
+ if not 'flag' in event:
+ if event['op'].startswith(('_append[', '_prepend[')):
+ extra_overrides.append(event['op'].split('[')[1].split(']')[0])
+ if extra_overrides:
+ logger.info('SRC_URI contains some conditional appends/prepends - will create branches to represent these')
+
+ initial_rev = None
+
+ appendexisted = False
+ recipefile = d.getVar('FILE')
+ appendfile = recipe_to_append(recipefile, config)
+ is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+
+ # We need to redirect WORKDIR, STAMPS_DIR etc. under a temporary
+ # directory so that:
+ # (a) we pick up all files that get unpacked to the WORKDIR, and
+ # (b) we don't disturb the existing build
+ # However, with recipe-specific sysroots the sysroots for the recipe
+ # will be prepared under WORKDIR, and if we used the system temporary
+ # directory (i.e. usually /tmp) as used by mkdtemp by default, then
+ # our attempts to hardlink files into the recipe-specific sysroots
+ # will fail on systems where /tmp is a different filesystem, and it
+ # would have to fall back to copying the files which is a waste of
+ # time. Put the temp directory under the WORKDIR to prevent that from
+ # being a problem.
+ tempbasedir = d.getVar('WORKDIR')
+ bb.utils.mkdirhier(tempbasedir)
+ tempdir = tempfile.mkdtemp(prefix='devtooltmp-', dir=tempbasedir)
+ try:
+ tinfoil.logger.setLevel(logging.WARNING)
+
+ # FIXME this results in a cache reload under control of tinfoil, which is fine
+ # except we don't get the knotty progress bar
+
+ if os.path.exists(appendfile):
+ appendbackup = os.path.join(tempdir, os.path.basename(appendfile) + '.bak')
+ shutil.copyfile(appendfile, appendbackup)
+ else:
+ appendbackup = None
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ logger.debug('writing append file %s' % appendfile)
+ with open(appendfile, 'a') as f:
+ f.write('###--- _extract_source\n')
+ f.write('DEVTOOL_TEMPDIR = "%s"\n' % tempdir)
+ f.write('DEVTOOL_DEVBRANCH = "%s"\n' % devbranch)
+ if not is_kernel_yocto:
+ f.write('PATCHTOOL = "git"\n')
+ f.write('PATCH_COMMIT_FUNCTIONS = "1"\n')
+ if extra_overrides:
+ f.write('DEVTOOL_EXTRA_OVERRIDES = "%s"\n' % ':'.join(extra_overrides))
+ f.write('inherit devtool-source\n')
+ f.write('###--- _extract_source\n')
+
+ update_unlockedsigs(basepath, workspace, fixed_setup, [pn])
+
+ sstate_manifests = d.getVar('SSTATE_MANIFESTS')
+ bb.utils.mkdirhier(sstate_manifests)
+ preservestampfile = os.path.join(sstate_manifests, 'preserve-stamps')
+ with open(preservestampfile, 'w') as f:
+ f.write(d.getVar('STAMP'))
+ try:
+ if bb.data.inherits_class('kernel-yocto', d):
+ # We need to generate the kernel config
+ task = 'do_configure'
+ else:
+ task = 'do_patch'
+
+ # Run the fetch + unpack tasks
+ res = tinfoil.build_targets(pn,
+ task,
+ handle_events=True)
+ finally:
+ if os.path.exists(preservestampfile):
+ os.remove(preservestampfile)
+
+ if not res:
+ raise DevtoolError('Extracting source for %s failed' % pn)
+
+ try:
+ with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
+ initial_rev = f.read()
+
+ with open(os.path.join(tempdir, 'srcsubdir'), 'r') as f:
+ srcsubdir = f.read()
+ except FileNotFoundError as e:
+ raise DevtoolError('Something went wrong with source extraction - the devtool-source class was not active or did not function correctly:\n%s' % str(e))
+ srcsubdir_rel = os.path.relpath(srcsubdir, os.path.join(tempdir, 'workdir'))
+
+ tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
+ srctree_localdir = os.path.join(srctree, 'oe-local-files')
+
+ if sync:
+ bb.process.run('git fetch file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
+
+ # Move oe-local-files directory to srctree
+ # As the oe-local-files is not part of the constructed git tree,
+ # remove them directly during the synchrounizating might surprise
+ # the users. Instead, we move it to oe-local-files.bak and remind
+ # user in the log message.
+ if os.path.exists(srctree_localdir + '.bak'):
+ shutil.rmtree(srctree_localdir, srctree_localdir + '.bak')
+
+ if os.path.exists(srctree_localdir):
+ logger.info('Backing up current local file directory %s' % srctree_localdir)
+ shutil.move(srctree_localdir, srctree_localdir + '.bak')
+
+ if os.path.exists(tempdir_localdir):
+ logger.info('Syncing local source files to srctree...')
+ shutil.copytree(tempdir_localdir, srctree_localdir)
+ else:
+ # Move oe-local-files directory to srctree
+ if os.path.exists(tempdir_localdir):
+ logger.info('Adding local source files to srctree...')
+ shutil.move(tempdir_localdir, srcsubdir)
+
+ shutil.move(srcsubdir, srctree)
+
+ if os.path.abspath(d.getVar('S')) == os.path.abspath(d.getVar('WORKDIR')):
+ # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+ # (otherwise the recipe won't build as expected)
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ addfiles = []
+ for root, _, files in os.walk(local_files_dir):
+ relpth = os.path.relpath(root, local_files_dir)
+ if relpth != '.':
+ bb.utils.mkdirhier(os.path.join(srctree, relpth))
+ for fn in files:
+ if fn == '.gitignore':
+ continue
+ destpth = os.path.join(srctree, relpth, fn)
+ if os.path.exists(destpth):
+ os.unlink(destpth)
+ os.symlink('oe-local-files/%s' % fn, destpth)
+ addfiles.append(os.path.join(relpth, fn))
+ if addfiles:
+ bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
+ bb.process.run('git %s commit -a -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+
+ if is_kernel_yocto:
+ logger.info('Copying kernel config to srctree')
+ shutil.copy2(os.path.join(tempdir, '.config'), srctree)
+
+ finally:
+ if appendbackup:
+ shutil.copyfile(appendbackup, appendfile)
+ elif os.path.exists(appendfile):
+ os.remove(appendfile)
+ if keep_temp:
+ logger.info('Preserving temporary directory %s' % tempdir)
+ else:
+ shutil.rmtree(tempdir)
+ return initial_rev, srcsubdir_rel
+
+def _add_md5(config, recipename, filename):
+ """Record checksum of a file (or recursively for a directory) to the md5-file of the workspace"""
+ import bb.utils
+
+ def addfile(fn):
+ md5 = bb.utils.md5_file(fn)
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'a+') as f:
+ md5_str = '%s|%s|%s\n' % (recipename, os.path.relpath(fn, config.workspace_path), md5)
+ f.seek(0, os.SEEK_SET)
+ if not md5_str in f.read():
+ f.write(md5_str)
+
+ if os.path.isdir(filename):
+ for root, _, files in os.walk(filename):
+ for f in files:
+ addfile(os.path.join(root, f))
+ else:
+ addfile(filename)
+
+def _check_preserve(config, recipename):
+ """Check if a file was manually changed and needs to be saved in 'attic'
+ directory"""
+ import bb.utils
+ origfile = os.path.join(config.workspace_path, '.devtool_md5')
+ newfile = os.path.join(config.workspace_path, '.devtool_md5_new')
+ preservepath = os.path.join(config.workspace_path, 'attic', recipename)
+ with open(origfile, 'r') as f:
+ with open(newfile, 'w') as tf:
+ for line in f.readlines():
+ splitline = line.rstrip().split('|')
+ if splitline[0] == recipename:
+ removefile = os.path.join(config.workspace_path, splitline[1])
+ try:
+ md5 = bb.utils.md5_file(removefile)
+ except IOError as err:
+ if err.errno == 2:
+ # File no longer exists, skip it
+ continue
+ else:
+ raise
+ if splitline[2] != md5:
+ bb.utils.mkdirhier(preservepath)
+ preservefile = os.path.basename(removefile)
+ logger.warn('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
+ shutil.move(removefile, os.path.join(preservepath, preservefile))
+ else:
+ os.remove(removefile)
+ else:
+ tf.write(line)
+ os.rename(newfile, origfile)
+
+def modify(args, config, basepath, workspace):
+ """Entry point for the devtool 'modify' subcommand"""
+ import bb
+ import oe.recipeutils
+ import oe.patch
+
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ args.recipename)
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ pn = rd.getVar('PN')
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" %
+ pn)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = get_default_srctree(config, pn)
+
+ if args.no_extract and not os.path.isdir(srctree):
+ raise DevtoolError("--no-extract specified and source path %s does "
+ "not exist or is not a directory" %
+ srctree)
+
+ recipefile = rd.getVar('FILE')
+ appendfile = recipe_to_append(recipefile, config, args.wildcard)
+ if os.path.exists(appendfile):
+ raise DevtoolError("Another variant of recipe %s is already in your "
+ "workspace (only one variant of a recipe can "
+ "currently be worked on at once)"
+ % pn)
+
+ _check_compatible_recipe(pn, rd)
+
+ initial_rev = None
+ commits = []
+ check_commits = False
+ if not args.no_extract:
+ initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ if not initial_rev:
+ return 1
+ logger.info('Source tree extracted to %s' % srctree)
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
+ commits = stdout.split()
+ check_commits = True
+ else:
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Check if it's a tree previously extracted by us
+ try:
+ (stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
+ except bb.process.ExecutionError:
+ stdout = ''
+ if stdout:
+ check_commits = True
+ for line in stdout.splitlines():
+ if line.startswith('*'):
+ (stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
+ initial_rev = stdout.rstrip()
+ if not initial_rev:
+ # Otherwise, just grab the head revision
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ initial_rev = stdout.rstrip()
+
+ branch_patches = {}
+ if check_commits:
+ # Check if there are override branches
+ (stdout, _) = bb.process.run('git branch', cwd=srctree)
+ branches = []
+ for line in stdout.rstrip().splitlines():
+ branchname = line[2:].rstrip()
+ if branchname.startswith(override_branch_prefix):
+ branches.append(branchname)
+ if branches:
+ logger.warn('SRC_URI is conditionally overridden in this recipe, thus several %s* branches have been created, one for each override that makes changes to SRC_URI. It is recommended that you make changes to the %s branch first, then checkout and rebase each %s* branch and update any unique patches there (duplicates on those branches will be ignored by devtool finish/update-recipe)' % (override_branch_prefix, args.branch, override_branch_prefix))
+ branches.insert(0, args.branch)
+ seen_patches = []
+ for branch in branches:
+ branch_patches[branch] = []
+ (stdout, _) = bb.process.run('git log devtool-base..%s' % branch, cwd=srctree)
+ for line in stdout.splitlines():
+ line = line.strip()
+ if line.startswith(oe.patch.GitApplyTree.patch_line_prefix):
+ origpatch = line[len(oe.patch.GitApplyTree.patch_line_prefix):].split(':', 1)[-1].strip()
+ if not origpatch in seen_patches:
+ seen_patches.append(origpatch)
+ branch_patches[branch].append(origpatch)
+
+ # Need to grab this here in case the source is within a subdirectory
+ srctreebase = srctree
+
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(rd.getVar('S'))
+ workdir = os.path.abspath(rd.getVar('WORKDIR'))
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+
+ bb.utils.mkdirhier(os.path.dirname(appendfile))
+ with open(appendfile, 'w') as f:
+ f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH_prepend := "%s:"\n' %
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
+
+ f.write('\ninherit externalsrc\n')
+ f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
+ f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+
+ b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+
+ if bb.data.inherits_class('kernel', rd):
+ f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
+ 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
+ f.write('\ndo_patch() {\n'
+ ' :\n'
+ '}\n')
+ f.write('\ndo_configure_append() {\n'
+ ' cp ${B}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ '}\n')
+ if initial_rev:
+ f.write('\n# initial_rev: %s\n' % initial_rev)
+ for commit in commits:
+ f.write('# commit: %s\n' % commit)
+ if branch_patches:
+ for branch in branch_patches:
+ if branch == args.branch:
+ continue
+ f.write('# patches_%s: %s\n' % (branch, ','.join(branch_patches[branch])))
+
+ update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
+
+ _add_md5(config, pn, appendfile)
+
+ logger.info('Recipe %s now set up to build from %s' % (pn, srctree))
+
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+
+def rename(args, config, basepath, workspace):
+ """Entry point for the devtool 'rename' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ check_workspace_recipe(workspace, args.recipename)
+
+ if not (args.newname or args.version):
+ raise DevtoolError('You must specify a new name, a version with -V/--version, or both')
+
+ recipefile = workspace[args.recipename]['recipefile']
+ if not recipefile:
+ raise DevtoolError('devtool rename can only be used where the recipe file itself is in the workspace (e.g. after devtool add)')
+
+ if args.newname and args.newname != args.recipename:
+ reason = oe.recipeutils.validate_pn(args.newname)
+ if reason:
+ raise DevtoolError(reason)
+ newname = args.newname
+ else:
+ newname = args.recipename
+
+ append = workspace[args.recipename]['bbappend']
+ appendfn = os.path.splitext(os.path.basename(append))[0]
+ splitfn = appendfn.split('_')
+ if len(splitfn) > 1:
+ origfnver = appendfn.split('_')[1]
+ else:
+ origfnver = ''
+
+ recipefilemd5 = None
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ bp = rd.getVar('BP')
+ bpn = rd.getVar('BPN')
+ if newname != args.recipename:
+ localdata = rd.createCopy()
+ localdata.setVar('PN', newname)
+ newbpn = localdata.getVar('BPN')
+ else:
+ newbpn = bpn
+ s = rd.getVar('S', False)
+ src_uri = rd.getVar('SRC_URI', False)
+ pv = rd.getVar('PV')
+
+ # Correct variable values that refer to the upstream source - these
+ # values must stay the same, so if the name/version are changing then
+ # we need to fix them up
+ new_s = s
+ new_src_uri = src_uri
+ if newbpn != bpn:
+ # ${PN} here is technically almost always incorrect, but people do use it
+ new_s = new_s.replace('${BPN}', bpn)
+ new_s = new_s.replace('${PN}', bpn)
+ new_s = new_s.replace('${BP}', '%s-${PV}' % bpn)
+ new_src_uri = new_src_uri.replace('${BPN}', bpn)
+ new_src_uri = new_src_uri.replace('${PN}', bpn)
+ new_src_uri = new_src_uri.replace('${BP}', '%s-${PV}' % bpn)
+ if args.version and origfnver == pv:
+ new_s = new_s.replace('${PV}', pv)
+ new_s = new_s.replace('${BP}', '${BPN}-%s' % pv)
+ new_src_uri = new_src_uri.replace('${PV}', pv)
+ new_src_uri = new_src_uri.replace('${BP}', '${BPN}-%s' % pv)
+ patchfields = {}
+ if new_s != s:
+ patchfields['S'] = new_s
+ if new_src_uri != src_uri:
+ patchfields['SRC_URI'] = new_src_uri
+ if patchfields:
+ recipefilemd5 = bb.utils.md5_file(recipefile)
+ oe.recipeutils.patch_recipe(rd, recipefile, patchfields)
+ newrecipefilemd5 = bb.utils.md5_file(recipefile)
+ finally:
+ tinfoil.shutdown()
+
+ if args.version:
+ newver = args.version
+ else:
+ newver = origfnver
+
+ if newver:
+ newappend = '%s_%s.bbappend' % (newname, newver)
+ newfile = '%s_%s.bb' % (newname, newver)
+ else:
+ newappend = '%s.bbappend' % newname
+ newfile = '%s.bb' % newname
+
+ oldrecipedir = os.path.dirname(recipefile)
+ newrecipedir = os.path.join(config.workspace_path, 'recipes', newname)
+ if oldrecipedir != newrecipedir:
+ bb.utils.mkdirhier(newrecipedir)
+
+ newappend = os.path.join(os.path.dirname(append), newappend)
+ newfile = os.path.join(newrecipedir, newfile)
+
+ # Rename bbappend
+ logger.info('Renaming %s to %s' % (append, newappend))
+ os.rename(append, newappend)
+ # Rename recipe file
+ logger.info('Renaming %s to %s' % (recipefile, newfile))
+ os.rename(recipefile, newfile)
+
+ # Rename source tree if it's the default path
+ appendmd5 = None
+ if not args.no_srctree:
+ srctree = workspace[args.recipename]['srctree']
+ if os.path.abspath(srctree) == os.path.join(config.workspace_path, 'sources', args.recipename):
+ newsrctree = os.path.join(config.workspace_path, 'sources', newname)
+ logger.info('Renaming %s to %s' % (srctree, newsrctree))
+ shutil.move(srctree, newsrctree)
+ # Correct any references (basically EXTERNALSRC*) in the .bbappend
+ appendmd5 = bb.utils.md5_file(newappend)
+ appendlines = []
+ with open(newappend, 'r') as f:
+ for line in f:
+ appendlines.append(line)
+ with open(newappend, 'w') as f:
+ for line in appendlines:
+ if srctree in line:
+ line = line.replace(srctree, newsrctree)
+ f.write(line)
+ newappendmd5 = bb.utils.md5_file(newappend)
+
+ bpndir = None
+ newbpndir = None
+ if newbpn != bpn:
+ bpndir = os.path.join(oldrecipedir, bpn)
+ if os.path.exists(bpndir):
+ newbpndir = os.path.join(newrecipedir, newbpn)
+ logger.info('Renaming %s to %s' % (bpndir, newbpndir))
+ shutil.move(bpndir, newbpndir)
+
+ bpdir = None
+ newbpdir = None
+ if newver != origfnver or newbpn != bpn:
+ bpdir = os.path.join(oldrecipedir, bp)
+ if os.path.exists(bpdir):
+ newbpdir = os.path.join(newrecipedir, '%s-%s' % (newbpn, newver))
+ logger.info('Renaming %s to %s' % (bpdir, newbpdir))
+ shutil.move(bpdir, newbpdir)
+
+ if oldrecipedir != newrecipedir:
+ # Move any stray files and delete the old recipe directory
+ for entry in os.listdir(oldrecipedir):
+ oldpath = os.path.join(oldrecipedir, entry)
+ newpath = os.path.join(newrecipedir, entry)
+ logger.info('Renaming %s to %s' % (oldpath, newpath))
+ shutil.move(oldpath, newpath)
+ os.rmdir(oldrecipedir)
+
+ # Now take care of entries in .devtool_md5
+ md5entries = []
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'r') as f:
+ for line in f:
+ md5entries.append(line)
+
+ if bpndir and newbpndir:
+ relbpndir = os.path.relpath(bpndir, config.workspace_path) + '/'
+ else:
+ relbpndir = None
+ if bpdir and newbpdir:
+ relbpdir = os.path.relpath(bpdir, config.workspace_path) + '/'
+ else:
+ relbpdir = None
+
+ with open(os.path.join(config.workspace_path, '.devtool_md5'), 'w') as f:
+ for entry in md5entries:
+ splitentry = entry.rstrip().split('|')
+ if len(splitentry) > 2:
+ if splitentry[0] == args.recipename:
+ splitentry[0] = newname
+ if splitentry[1] == os.path.relpath(append, config.workspace_path):
+ splitentry[1] = os.path.relpath(newappend, config.workspace_path)
+ if appendmd5 and splitentry[2] == appendmd5:
+ splitentry[2] = newappendmd5
+ elif splitentry[1] == os.path.relpath(recipefile, config.workspace_path):
+ splitentry[1] = os.path.relpath(newfile, config.workspace_path)
+ if recipefilemd5 and splitentry[2] == recipefilemd5:
+ splitentry[2] = newrecipefilemd5
+ elif relbpndir and splitentry[1].startswith(relbpndir):
+ splitentry[1] = os.path.relpath(os.path.join(newbpndir, splitentry[1][len(relbpndir):]), config.workspace_path)
+ elif relbpdir and splitentry[1].startswith(relbpdir):
+ splitentry[1] = os.path.relpath(os.path.join(newbpdir, splitentry[1][len(relbpdir):]), config.workspace_path)
+ entry = '|'.join(splitentry) + '\n'
+ f.write(entry)
+ return 0
+
+
+def _get_patchset_revs(srctree, recipe_path, initial_rev=None, force_patch_refresh=False):
+ """Get initial and update rev of a recipe. These are the start point of the
+ whole patchset and start point for the patches to be re-generated/updated.
+ """
+ import bb
+
+ # Get current branch
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+
+ # Parse initial rev from recipe if not specified
+ commits = []
+ patches = []
+ with open(recipe_path, 'r') as f:
+ for line in f:
+ if line.startswith('# initial_rev:'):
+ if not initial_rev:
+ initial_rev = line.split(':')[-1].strip()
+ elif line.startswith('# commit:') and not force_patch_refresh:
+ commits.append(line.split(':')[-1].strip())
+ elif line.startswith('# patches_%s:' % branchname):
+ patches = line.split(':')[-1].strip().split(',')
+
+ update_rev = initial_rev
+ changed_revs = None
+ if initial_rev:
+ # Find first actually changed revision
+ stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
+ initial_rev, cwd=srctree)
+ newcommits = stdout.split()
+ for i in range(min(len(commits), len(newcommits))):
+ if newcommits[i] == commits[i]:
+ update_rev = commits[i]
+
+ try:
+ stdout, _ = bb.process.run('git cherry devtool-patched',
+ cwd=srctree)
+ except bb.process.ExecutionError as err:
+ stdout = None
+
+ if stdout is not None and not force_patch_refresh:
+ changed_revs = []
+ for line in stdout.splitlines():
+ if line.startswith('+ '):
+ rev = line.split()[1]
+ if rev in newcommits:
+ changed_revs.append(rev)
+
+ return initial_rev, update_rev, changed_revs, patches
+
+def _remove_file_entries(srcuri, filelist):
+ """Remove file:// entries from SRC_URI"""
+ remaining = filelist[:]
+ entries = []
+ for fname in filelist:
+ basename = os.path.basename(fname)
+ for i in range(len(srcuri)):
+ if (srcuri[i].startswith('file://') and
+ os.path.basename(srcuri[i].split(';')[0]) == basename):
+ entries.append(srcuri[i])
+ remaining.remove(fname)
+ srcuri.pop(i)
+ break
+ return entries, remaining
+
+def _replace_srcuri_entry(srcuri, filename, newentry):
+ """Replace entry corresponding to specified file with a new entry"""
+ basename = os.path.basename(filename)
+ for i in range(len(srcuri)):
+ if os.path.basename(srcuri[i].split(';')[0]) == basename:
+ srcuri.pop(i)
+ srcuri.insert(i, newentry)
+ break
+
+def _remove_source_files(append, files, destpath, no_report_remove=False, dry_run=False):
+ """Unlink existing patch files"""
+
+ dry_run_suffix = ' (dry-run)' if dry_run else ''
+
+ for path in files:
+ if append:
+ if not destpath:
+ raise Exception('destpath should be set here')
+ path = os.path.join(destpath, os.path.basename(path))
+
+ if os.path.exists(path):
+ if not no_report_remove:
+ logger.info('Removing file %s%s' % (path, dry_run_suffix))
+ if not dry_run:
+ # FIXME "git rm" here would be nice if the file in question is
+ # tracked
+ # FIXME there's a chance that this file is referred to by
+ # another recipe, in which case deleting wouldn't be the
+ # right thing to do
+ os.remove(path)
+ # Remove directory if empty
+ try:
+ os.rmdir(os.path.dirname(path))
+ except OSError as ose:
+ if ose.errno != errno.ENOTEMPTY:
+ raise
+
+
+def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
+ """Export patches from srctree to given location.
+ Returns three-tuple of dicts:
+ 1. updated - patches that already exist in SRCURI
+ 2. added - new patches that don't exist in SRCURI
+ 3 removed - patches that exist in SRCURI but not in exported patches
+ In each dict the key is the 'basepath' of the URI and value is the
+ absolute path to the existing file in recipe space (if any).
+ """
+ import oe.recipeutils
+ from oe.patch import GitApplyTree
+ updated = OrderedDict()
+ added = OrderedDict()
+ seqpatch_re = re.compile('^([0-9]{4}-)?(.+)')
+
+ existing_patches = dict((os.path.basename(path), path) for path in
+ oe.recipeutils.get_recipe_patches(rd))
+ logger.debug('Existing patches: %s' % existing_patches)
+
+ # Generate patches from Git, exclude local files directory
+ patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
+ GitApplyTree.extractPatches(srctree, start_rev, destdir, patch_pathspec)
+
+ new_patches = sorted(os.listdir(destdir))
+ for new_patch in new_patches:
+ # Strip numbering from patch names. If it's a git sequence named patch,
+ # the numbers might not match up since we are starting from a different
+ # revision This does assume that people are using unique shortlog
+ # values, but they ought to be anyway...
+ new_basename = seqpatch_re.match(new_patch).group(2)
+ match_name = None
+ for old_patch in existing_patches:
+ old_basename = seqpatch_re.match(old_patch).group(2)
+ old_basename_splitext = os.path.splitext(old_basename)
+ if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+ old_patch_noext = os.path.splitext(old_patch)[0]
+ match_name = old_patch_noext
+ break
+ elif new_basename == old_basename:
+ match_name = old_patch
+ break
+ if match_name:
+ # Rename patch files
+ if new_patch != match_name:
+ os.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, match_name))
+ # Need to pop it off the list now before checking changed_revs
+ oldpath = existing_patches.pop(old_patch)
+ if changed_revs is not None:
+ # Avoid updating patches that have not actually changed
+ with open(os.path.join(destdir, match_name), 'r') as f:
+ firstlineitems = f.readline().split()
+ # Looking for "From <hash>" line
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs:
+ continue
+ # Recompress if necessary
+ if oldpath.endswith(('.gz', '.Z')):
+ bb.process.run(['gzip', match_name], cwd=destdir)
+ if oldpath.endswith('.gz'):
+ match_name += '.gz'
+ else:
+ match_name += '.Z'
+ elif oldpath.endswith('.bz2'):
+ bb.process.run(['bzip2', match_name], cwd=destdir)
+ match_name += '.bz2'
+ updated[match_name] = oldpath
+ else:
+ added[new_patch] = None
+ return (updated, added, existing_patches)
+
+
+def _create_kconfig_diff(srctree, rd, outfile):
+ """Create a kconfig fragment"""
+ # Only update config fragment if both config files exist
+ orig_config = os.path.join(srctree, '.config.baseline')
+ new_config = os.path.join(srctree, '.config.new')
+ if os.path.exists(orig_config) and os.path.exists(new_config):
+ cmd = ['diff', '--new-line-format=%L', '--old-line-format=',
+ '--unchanged-line-format=', orig_config, new_config]
+ pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = pipe.communicate()
+ if pipe.returncode == 1:
+ logger.info("Updating config fragment %s" % outfile)
+ with open(outfile, 'wb') as fobj:
+ fobj.write(stdout)
+ elif pipe.returncode == 0:
+ logger.info("Would remove config fragment %s" % outfile)
+ if os.path.exists(outfile):
+ # Remove fragment file in case of empty diff
+ logger.info("Removing config fragment %s" % outfile)
+ os.unlink(outfile)
+ else:
+ raise bb.process.ExecutionError(cmd, pipe.returncode, stdout, stderr)
+ return True
+ return False
+
+
+def _export_local_files(srctree, rd, destdir, srctreebase):
+ """Copy local files from srctree to given location.
+ Returns three-tuple of dicts:
+ 1. updated - files that already exist in SRCURI
+ 2. added - new files files that don't exist in SRCURI
+ 3 removed - files that exist in SRCURI but not in exported files
+ In each dict the key is the 'basepath' of the URI and value is the
+ absolute path to the existing file in recipe space (if any).
+ """
+ import oe.recipeutils
+
+ # Find out local files (SRC_URI files that exist in the "recipe space").
+ # Local files that reside in srctree are not included in patch generation.
+ # Instead they are directly copied over the original source files (in
+ # recipe space).
+ existing_files = oe.recipeutils.get_recipe_local_files(rd)
+ new_set = None
+ updated = OrderedDict()
+ added = OrderedDict()
+ removed = OrderedDict()
+ local_files_dir = os.path.join(srctreebase, 'oe-local-files')
+ git_files = _git_ls_tree(srctree)
+ if 'oe-local-files' in git_files:
+ # If tracked by Git, take the files from srctree HEAD. First get
+ # the tree object of the directory
+ tmp_index = os.path.join(srctree, '.git', 'index.tmp.devtool')
+ tree = git_files['oe-local-files'][2]
+ bb.process.run(['git', 'checkout', tree, '--', '.'], cwd=srctree,
+ env=dict(os.environ, GIT_WORK_TREE=destdir,
+ GIT_INDEX_FILE=tmp_index))
+ new_set = list(_git_ls_tree(srctree, tree, True).keys())
+ elif os.path.isdir(local_files_dir):
+ # If not tracked by Git, just copy from working copy
+ new_set = _ls_tree(local_files_dir)
+ bb.process.run(['cp', '-ax',
+ os.path.join(local_files_dir, '.'), destdir])
+ else:
+ new_set = []
+
+ # Special handling for kernel config
+ if bb.data.inherits_class('kernel-yocto', rd):
+ fragment_fn = 'devtool-fragment.cfg'
+ fragment_path = os.path.join(destdir, fragment_fn)
+ if _create_kconfig_diff(srctree, rd, fragment_path):
+ if os.path.exists(fragment_path):
+ if fragment_fn not in new_set:
+ new_set.append(fragment_fn)
+ # Copy fragment to local-files
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(fragment_path, local_files_dir)
+ else:
+ if fragment_fn in new_set:
+ new_set.remove(fragment_fn)
+ # Remove fragment from local-files
+ if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
+ os.unlink(os.path.join(local_files_dir, fragment_fn))
+
+ if new_set is not None:
+ for fname in new_set:
+ if fname in existing_files:
+ origpath = existing_files.pop(fname)
+ workpath = os.path.join(local_files_dir, fname)
+ if not filecmp.cmp(origpath, workpath):
+ updated[fname] = origpath
+ elif fname != '.gitignore':
+ added[fname] = None
+
+ workdir = rd.getVar('WORKDIR')
+ s = rd.getVar('S')
+ if not s.endswith(os.sep):
+ s += os.sep
+
+ if workdir != s:
+ # Handle files where subdir= was specified
+ for fname in list(existing_files.keys()):
+ # FIXME handle both subdir starting with BP and not?
+ fworkpath = os.path.join(workdir, fname)
+ if fworkpath.startswith(s):
+ fpath = os.path.join(srctree, os.path.relpath(fworkpath, s))
+ if os.path.exists(fpath):
+ origpath = existing_files.pop(fname)
+ if not filecmp.cmp(origpath, fpath):
+ updated[fpath] = origpath
+
+ removed = existing_files
+ return (updated, added, removed)
+
+
+def _determine_files_dir(rd):
+ """Determine the appropriate files directory for a recipe"""
+ recipedir = rd.getVar('FILE_DIRNAME')
+ for entry in rd.getVar('FILESPATH').split(':'):
+ relpth = os.path.relpath(entry, recipedir)
+ if not os.sep in relpth:
+ # One (or zero) levels below only, so we don't put anything in machine-specific directories
+ if os.path.isdir(entry):
+ return entry
+ return os.path.join(recipedir, rd.getVar('BPN'))
+
+
+def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, no_report_remove, dry_run_outdir=None):
+ """Implement the 'srcrev' mode of update-recipe"""
+ import bb
+ import oe.recipeutils
+
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.basename(recipefile)
+ logger.info('Updating SRCREV in recipe %s%s' % (recipedir, dry_run_suffix))
+
+ # Get HEAD revision
+ try:
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ except bb.process.ExecutionError as err:
+ raise DevtoolError('Failed to get HEAD revision in %s: %s' %
+ (srctree, err))
+ srcrev = stdout.strip()
+ if len(srcrev) != 40:
+ raise DevtoolError('Invalid hash returned by git: %s' % stdout)
+
+ destpath = None
+ remove_files = []
+ patchfields = {}
+ patchfields['SRCREV'] = srcrev
+ orig_src_uri = rd.getVar('SRC_URI', False) or ''
+ srcuri = orig_src_uri.split()
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ update_srcuri = False
+ appendfile = None
+ try:
+ local_files_dir = tempfile.mkdtemp(dir=tempdir)
+ srctreebase = workspace[recipename]['srctreebase']
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
+ if not no_remove:
+ # Find list of existing patches in recipe file
+ patches_dir = tempfile.mkdtemp(dir=tempdir)
+ old_srcrev = rd.getVar('SRCREV') or ''
+ upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
+ patches_dir)
+ logger.debug('Patches: update %s, new %s, delete %s' % (dict(upd_p), dict(new_p), dict(del_p)))
+
+ # Remove deleted local files and "overlapping" patches
+ remove_files = list(del_f.values()) + list(upd_p.values()) + list(del_p.values())
+ if remove_files:
+ removedentries = _remove_file_entries(srcuri, remove_files)[0]
+ update_srcuri = True
+
+ if appendlayerdir:
+ files = dict((os.path.join(local_files_dir, key), val) for
+ key, val in list(upd_f.items()) + list(new_f.items()))
+ removevalues = {}
+ if update_srcuri:
+ removevalues = {'SRC_URI': removedentries}
+ patchfields['SRC_URI'] = '\\\n '.join(srcuri)
+ if dry_run_outdir:
+ logger.info('Creating bbappend (dry-run)')
+ else:
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
+ extralines=patchfields, removevalues=removevalues,
+ redirect_output=dry_run_outdir)
+ else:
+ files_dir = _determine_files_dir(rd)
+ for basepath, path in upd_f.items():
+ logger.info('Updating file %s%s' % (basepath, dry_run_suffix))
+ if os.path.isabs(basepath):
+ # Original file (probably with subdir pointing inside source tree)
+ # so we do not want to move it, just copy
+ _copy_file(basepath, path, dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
+ else:
+ _move_file(os.path.join(local_files_dir, basepath), path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
+ update_srcuri= True
+ for basepath, path in new_f.items():
+ logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
+ _move_file(os.path.join(local_files_dir, basepath),
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
+ srcuri.append('file://%s' % basepath)
+ update_srcuri = True
+ if update_srcuri:
+ patchfields['SRC_URI'] = ' '.join(srcuri)
+ ret = oe.recipeutils.patch_recipe(rd, recipefile, patchfields, redirect_output=dry_run_outdir)
+ finally:
+ shutil.rmtree(tempdir)
+ if not 'git://' in orig_src_uri:
+ logger.info('You will need to update SRC_URI within the recipe to '
+ 'point to a git repository where you have pushed your '
+ 'changes')
+
+ _remove_source_files(appendlayerdir, remove_files, destpath, no_report_remove, dry_run=dry_run_outdir)
+ return True, appendfile, remove_files
+
+def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wildcard_version, no_remove, no_report_remove, initial_rev, dry_run_outdir=None, force_patch_refresh=False):
+ """Implement the 'patch' mode of update-recipe"""
+ import bb
+ import oe.recipeutils
+
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.dirname(recipefile)
+ append = workspace[recipename]['bbappend']
+ if not os.path.exists(append):
+ raise DevtoolError('unable to find workspace bbappend for recipe %s' %
+ recipename)
+
+ initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
+ if not initial_rev:
+ raise DevtoolError('Unable to find initial revision - please specify '
+ 'it with --initial-rev')
+
+ appendfile = None
+ dl_dir = rd.getVar('DL_DIR')
+ if not dl_dir.endswith('/'):
+ dl_dir += '/'
+
+ dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
+
+ tempdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ local_files_dir = tempfile.mkdtemp(dir=tempdir)
+ if filter_patches:
+ upd_f = {}
+ new_f = {}
+ del_f = {}
+ else:
+ srctreebase = workspace[recipename]['srctreebase']
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
+
+ remove_files = []
+ if not no_remove:
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ _, _, del_p = _export_patches(srctree, rd, initial_rev,
+ all_patches_dir)
+ # Remove deleted local files and patches
+ remove_files = list(del_f.values()) + list(del_p.values())
+
+ # Get updated patches from source tree
+ patches_dir = tempfile.mkdtemp(dir=tempdir)
+ upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
+ patches_dir, changed_revs)
+ logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
+ if filter_patches:
+ new_p = {}
+ upd_p = {k:v for k,v in upd_p.items() if k in filter_patches}
+ remove_files = [f for f in remove_files if f in filter_patches]
+ updatefiles = False
+ updaterecipe = False
+ destpath = None
+ srcuri = (rd.getVar('SRC_URI', False) or '').split()
+ if appendlayerdir:
+ files = dict((os.path.join(local_files_dir, key), val) for
+ key, val in list(upd_f.items()) + list(new_f.items()))
+ files.update(dict((os.path.join(patches_dir, key), val) for
+ key, val in list(upd_p.items()) + list(new_p.items())))
+ if files or remove_files:
+ removevalues = None
+ if remove_files:
+ removedentries, remaining = _remove_file_entries(
+ srcuri, remove_files)
+ if removedentries or remaining:
+ remaining = ['file://' + os.path.basename(item) for
+ item in remaining]
+ removevalues = {'SRC_URI': removedentries + remaining}
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files,
+ wildcardver=wildcard_version,
+ removevalues=removevalues,
+ redirect_output=dry_run_outdir)
+ else:
+ logger.info('No patches or local source files needed updating')
+ else:
+ # Update existing files
+ files_dir = _determine_files_dir(rd)
+ for basepath, path in upd_f.items():
+ logger.info('Updating file %s' % basepath)
+ if os.path.isabs(basepath):
+ # Original file (probably with subdir pointing inside source tree)
+ # so we do not want to move it, just copy
+ _copy_file(basepath, path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
+ else:
+ _move_file(os.path.join(local_files_dir, basepath), path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
+ updatefiles = True
+ for basepath, path in upd_p.items():
+ patchfn = os.path.join(patches_dir, basepath)
+ if os.path.dirname(path) + '/' == dl_dir:
+ # This is a a downloaded patch file - we now need to
+ # replace the entry in SRC_URI with our local version
+ logger.info('Replacing remote patch %s with updated local version' % basepath)
+ path = os.path.join(files_dir, basepath)
+ _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+ updaterecipe = True
+ else:
+ logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
+ _move_file(patchfn, path,
+ dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
+ updatefiles = True
+ # Add any new files
+ for basepath, path in new_f.items():
+ logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
+ _move_file(os.path.join(local_files_dir, basepath),
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
+ srcuri.append('file://%s' % basepath)
+ updaterecipe = True
+ for basepath, path in new_p.items():
+ logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
+ _move_file(os.path.join(patches_dir, basepath),
+ os.path.join(files_dir, basepath),
+ dry_run_outdir=dry_run_outdir,
+ base_outdir=recipedir)
+ srcuri.append('file://%s' % basepath)
+ updaterecipe = True
+ # Update recipe, if needed
+ if _remove_file_entries(srcuri, remove_files)[0]:
+ updaterecipe = True
+ if updaterecipe:
+ if not dry_run_outdir:
+ logger.info('Updating recipe %s' % os.path.basename(recipefile))
+ ret = oe.recipeutils.patch_recipe(rd, recipefile,
+ {'SRC_URI': ' '.join(srcuri)},
+ redirect_output=dry_run_outdir)
+ elif not updatefiles:
+ # Neither patches nor recipe were updated
+ logger.info('No patches or files need updating')
+ return False, None, []
+ finally:
+ shutil.rmtree(tempdir)
+
+ _remove_source_files(appendlayerdir, remove_files, destpath, no_report_remove, dry_run=dry_run_outdir)
+ return True, appendfile, remove_files
+
+def _guess_recipe_update_mode(srctree, rdata):
+ """Guess the recipe update mode to use"""
+ src_uri = (rdata.getVar('SRC_URI', False) or '').split()
+ git_uris = [uri for uri in src_uri if uri.startswith('git://')]
+ if not git_uris:
+ return 'patch'
+ # Just use the first URI for now
+ uri = git_uris[0]
+ # Check remote branch
+ params = bb.fetch.decodeurl(uri)[5]
+ upstr_branch = params['branch'] if 'branch' in params else 'master'
+ # Check if current branch HEAD is found in upstream branch
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ head_rev = stdout.rstrip()
+ stdout, _ = bb.process.run('git branch -r --contains %s' % head_rev,
+ cwd=srctree)
+ remote_brs = [branch.strip() for branch in stdout.splitlines()]
+ if 'origin/' + upstr_branch in remote_brs:
+ return 'srcrev'
+
+ return 'patch'
+
+def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_version, no_remove, initial_rev, no_report_remove=False, dry_run_outdir=None, no_overrides=False, force_patch_refresh=False):
+ srctree = workspace[recipename]['srctree']
+ if mode == 'auto':
+ mode = _guess_recipe_update_mode(srctree, rd)
+
+ override_branches = []
+ mainbranch = None
+ startbranch = None
+ if not no_overrides:
+ stdout, _ = bb.process.run('git branch', cwd=srctree)
+ other_branches = []
+ for line in stdout.splitlines():
+ branchname = line[2:]
+ if line.startswith('* '):
+ startbranch = branchname
+ if branchname.startswith(override_branch_prefix):
+ override_branches.append(branchname)
+ else:
+ other_branches.append(branchname)
+
+ if override_branches:
+ logger.debug('_update_recipe: override branches: %s' % override_branches)
+ logger.debug('_update_recipe: other branches: %s' % other_branches)
+ if startbranch.startswith(override_branch_prefix):
+ if len(other_branches) == 1:
+ mainbranch = other_branches[1]
+ else:
+ raise DevtoolError('Unable to determine main branch - please check out the main branch in source tree first')
+ else:
+ mainbranch = startbranch
+
+ checkedout = None
+ anyupdated = False
+ appendfile = None
+ allremoved = []
+ if override_branches:
+ logger.info('Handling main branch (%s)...' % mainbranch)
+ if startbranch != mainbranch:
+ bb.process.run('git checkout %s' % mainbranch, cwd=srctree)
+ checkedout = mainbranch
+ try:
+ branchlist = [mainbranch] + override_branches
+ for branch in branchlist:
+ crd = bb.data.createCopy(rd)
+ if branch != mainbranch:
+ logger.info('Handling branch %s...' % branch)
+ override = branch[len(override_branch_prefix):]
+ crd.appendVar('OVERRIDES', ':%s' % override)
+ bb.process.run('git checkout %s' % branch, cwd=srctree)
+ checkedout = branch
+
+ if mode == 'srcrev':
+ updated, appendf, removed = _update_recipe_srcrev(recipename, workspace, srctree, crd, appendlayerdir, wildcard_version, no_remove, no_report_remove, dry_run_outdir)
+ elif mode == 'patch':
+ updated, appendf, removed = _update_recipe_patch(recipename, workspace, srctree, crd, appendlayerdir, wildcard_version, no_remove, no_report_remove, initial_rev, dry_run_outdir, force_patch_refresh)
+ else:
+ raise DevtoolError('update_recipe: invalid mode %s' % mode)
+ if updated:
+ anyupdated = True
+ if appendf:
+ appendfile = appendf
+ allremoved.extend(removed)
+ finally:
+ if startbranch and checkedout != startbranch:
+ bb.process.run('git checkout %s' % startbranch, cwd=srctree)
+
+ return anyupdated, appendfile, allremoved
+
+def update_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'update-recipe' subcommand"""
+ check_workspace_recipe(workspace, args.recipename)
+
+ if args.append:
+ if not os.path.exists(args.append):
+ raise DevtoolError('bbappend destination layer directory "%s" '
+ 'does not exist' % args.append)
+ if not os.path.exists(os.path.join(args.append, 'conf', 'layer.conf')):
+ raise DevtoolError('conf/layer.conf not found in bbappend '
+ 'destination layer "%s"' % args.append)
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+ updated, _, _ = _update_recipe(args.recipename, workspace, rd, args.mode, args.append, args.wildcard_version, args.no_remove, args.initial_rev, dry_run_outdir=dry_run_outdir, no_overrides=args.no_overrides, force_patch_refresh=args.force_patch_refresh)
+
+ if updated:
+ rf = rd.getVar('FILE')
+ if rf.startswith(config.workspace_path):
+ logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+ finally:
+ tinfoil.shutdown()
+
+ return 0
+
+
+def status(args, config, basepath, workspace):
+ """Entry point for the devtool 'status' subcommand"""
+ if workspace:
+ for recipe, value in sorted(workspace.items()):
+ recipefile = value['recipefile']
+ if recipefile:
+ recipestr = ' (%s)' % recipefile
+ else:
+ recipestr = ''
+ print("%s: %s%s" % (recipe, value['srctree'], recipestr))
+ else:
+ logger.info('No recipes currently in your workspace - you can use "devtool modify" to work on an existing recipe or "devtool add" to add a new one')
+ return 0
+
+
+def _reset(recipes, no_clean, config, basepath, workspace):
+ """Reset one or more recipes"""
+ import oe.path
+
+ def clean_preferred_provider(pn, layerconf_path):
+ """Remove PREFERRED_PROVIDER from layer.conf'"""
+ import re
+ layerconf_file = os.path.join(layerconf_path, 'conf', 'layer.conf')
+ new_layerconf_file = os.path.join(layerconf_path, 'conf', '.layer.conf')
+ pprovider_found = False
+ with open(layerconf_file, 'r') as f:
+ lines = f.readlines()
+ with open(new_layerconf_file, 'a') as nf:
+ for line in lines:
+ pprovider_exp = r'^PREFERRED_PROVIDER_.*? = "' + pn + r'"$'
+ if not re.match(pprovider_exp, line):
+ nf.write(line)
+ else:
+ pprovider_found = True
+ if pprovider_found:
+ shutil.move(new_layerconf_file, layerconf_file)
+ else:
+ os.remove(new_layerconf_file)
+
+ if recipes and not no_clean:
+ if len(recipes) == 1:
+ logger.info('Cleaning sysroot for recipe %s...' % recipes[0])
+ else:
+ logger.info('Cleaning sysroot for recipes %s...' % ', '.join(recipes))
+ # If the recipe file itself was created in the workspace, and
+ # it uses BBCLASSEXTEND, then we need to also clean the other
+ # variants
+ targets = []
+ for recipe in recipes:
+ targets.append(recipe)
+ recipefile = workspace[recipe]['recipefile']
+ if recipefile and os.path.exists(recipefile):
+ targets.extend(get_bbclassextend_targets(recipefile, recipe))
+ try:
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c clean %s' % ' '.join(targets))
+ except bb.process.ExecutionError as e:
+ raise DevtoolError('Command \'%s\' failed, output:\n%s\nIf you '
+ 'wish, you may specify -n/--no-clean to '
+ 'skip running this command when resetting' %
+ (e.command, e.stdout))
+
+ for pn in recipes:
+ _check_preserve(config, pn)
+
+ appendfile = workspace[pn]['bbappend']
+ if os.path.exists(appendfile):
+ # This shouldn't happen, but is possible if devtool errored out prior to
+ # writing the md5 file. We need to delete this here or the recipe won't
+ # actually be reset
+ os.remove(appendfile)
+
+ preservepath = os.path.join(config.workspace_path, 'attic', pn, pn)
+ def preservedir(origdir):
+ if os.path.exists(origdir):
+ for root, dirs, files in os.walk(origdir):
+ for fn in files:
+ logger.warn('Preserving %s in %s' % (fn, preservepath))
+ _move_file(os.path.join(origdir, fn),
+ os.path.join(preservepath, fn))
+ for dn in dirs:
+ preservedir(os.path.join(root, dn))
+ os.rmdir(origdir)
+
+ recipefile = workspace[pn]['recipefile']
+ if recipefile and oe.path.is_path_parent(config.workspace_path, recipefile):
+ # This should always be true if recipefile is set, but just in case
+ preservedir(os.path.dirname(recipefile))
+ # We don't automatically create this dir next to appends, but the user can
+ preservedir(os.path.join(config.workspace_path, 'appends', pn))
+
+ srctreebase = workspace[pn]['srctreebase']
+ if os.path.isdir(srctreebase):
+ if os.listdir(srctreebase):
+ # We don't want to risk wiping out any work in progress
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
+ else:
+ # This is unlikely, but if it's empty we can just remove it
+ os.rmdir(srctreebase)
+
+ clean_preferred_provider(pn, config.workspace_path)
+
+def reset(args, config, basepath, workspace):
+ """Entry point for the devtool 'reset' subcommand"""
+ import bb
+ if args.recipename:
+ if args.all:
+ raise DevtoolError("Recipe cannot be specified if -a/--all is used")
+ else:
+ for recipe in args.recipename:
+ check_workspace_recipe(workspace, recipe, checksrc=False)
+ elif not args.all:
+ raise DevtoolError("Recipe must be specified, or specify -a/--all to "
+ "reset all recipes")
+ if args.all:
+ recipes = list(workspace.keys())
+ else:
+ recipes = args.recipename
+
+ _reset(recipes, args.no_clean, config, basepath, workspace)
+
+ return 0
+
+
+def _get_layer(layername, d):
+ """Determine the base layer path for the specified layer name/path"""
+ layerdirs = d.getVar('BBLAYERS').split()
+ layers = {os.path.basename(p): p for p in layerdirs}
+ # Provide some shortcuts
+ if layername.lower() in ['oe-core', 'openembedded-core']:
+ layerdir = layers.get('meta', None)
+ else:
+ layerdir = layers.get(layername, None)
+ return os.path.abspath(layerdir or layername)
+
+def finish(args, config, basepath, workspace):
+ """Entry point for the devtool 'finish' subcommand"""
+ import bb
+ import oe.recipeutils
+
+ check_workspace_recipe(workspace, args.recipename)
+
+ dry_run_suffix = ' (dry-run)' if args.dry_run else ''
+
+ # Grab the equivalent of COREBASE without having to initialise tinfoil
+ corebasedir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+
+ srctree = workspace[args.recipename]['srctree']
+ check_git_repo_op(srctree, [corebasedir])
+ dirty = check_git_repo_dirty(srctree)
+ if dirty:
+ if args.force:
+ logger.warning('Source tree is not clean, continuing as requested by -f/--force')
+ else:
+ raise DevtoolError('Source tree is not clean:\n\n%s\nEnsure you have committed your changes or use -f/--force if you are sure there\'s nothing that needs to be committed' % dirty)
+
+ no_clean = False
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ destlayerdir = _get_layer(args.destination, tinfoil.config_data)
+ recipefile = rd.getVar('FILE')
+ recipedir = os.path.dirname(recipefile)
+ origlayerdir = oe.recipeutils.find_layerdir(recipefile)
+
+ if not os.path.isdir(destlayerdir):
+ raise DevtoolError('Unable to find layer or directory matching "%s"' % args.destination)
+
+ if os.path.abspath(destlayerdir) == config.workspace_path:
+ raise DevtoolError('"%s" specifies the workspace layer - that is not a valid destination' % args.destination)
+
+ # If it's an upgrade, grab the original path
+ origpath = None
+ origfilelist = None
+ append = workspace[args.recipename]['bbappend']
+ with open(append, 'r') as f:
+ for line in f:
+ if line.startswith('# original_path:'):
+ origpath = line.split(':')[1].strip()
+ elif line.startswith('# original_files:'):
+ origfilelist = line.split(':')[1].split()
+
+ destlayerbasedir = oe.recipeutils.find_layerdir(destlayerdir)
+
+ if origlayerdir == config.workspace_path:
+ # Recipe file itself is in workspace, update it there first
+ appendlayerdir = None
+ origrelpath = None
+ if origpath:
+ origlayerpath = oe.recipeutils.find_layerdir(origpath)
+ if origlayerpath:
+ origrelpath = os.path.relpath(origpath, origlayerpath)
+ destpath = oe.recipeutils.get_bbfile_path(rd, destlayerdir, origrelpath)
+ if not destpath:
+ raise DevtoolError("Unable to determine destination layer path - check that %s specifies an actual layer and %s/conf/layer.conf specifies BBFILES. You may also need to specify a more complete path." % (args.destination, destlayerdir))
+ # Warn if the layer isn't in bblayers.conf (the code to create a bbappend will do this in other cases)
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
+ if not os.path.abspath(destlayerbasedir) in layerdirs:
+ bb.warn('Specified destination layer is not currently enabled in bblayers.conf, so the %s recipe will now be unavailable in your current configuration until you add the layer there' % args.recipename)
+
+ elif destlayerdir == origlayerdir:
+ # Same layer, update the original recipe
+ appendlayerdir = None
+ destpath = None
+ else:
+ # Create/update a bbappend in the specified layer
+ appendlayerdir = destlayerdir
+ destpath = None
+
+ # Actually update the recipe / bbappend
+ removing_original = (origpath and origfilelist and oe.recipeutils.find_layerdir(origpath) == destlayerbasedir)
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+ updated, appendfile, removed = _update_recipe(args.recipename, workspace, rd, args.mode, appendlayerdir, wildcard_version=True, no_remove=False, no_report_remove=removing_original, initial_rev=args.initial_rev, dry_run_outdir=dry_run_outdir, no_overrides=args.no_overrides, force_patch_refresh=args.force_patch_refresh)
+ removed = [os.path.relpath(pth, recipedir) for pth in removed]
+
+ # Remove any old files in the case of an upgrade
+ if removing_original:
+ for fn in origfilelist:
+ fnp = os.path.join(origpath, fn)
+ if fn in removed or not os.path.exists(os.path.join(recipedir, fn)):
+ logger.info('Removing file %s%s' % (fnp, dry_run_suffix))
+ if not args.dry_run:
+ try:
+ os.remove(fnp)
+ except FileNotFoundError:
+ pass
+
+ if origlayerdir == config.workspace_path and destpath:
+ # Recipe file itself is in the workspace - need to move it and any
+ # associated files to the specified layer
+ no_clean = True
+ logger.info('Moving recipe file to %s%s' % (destpath, dry_run_suffix))
+ for root, _, files in os.walk(recipedir):
+ for fn in files:
+ srcpath = os.path.join(root, fn)
+ relpth = os.path.relpath(os.path.dirname(srcpath), recipedir)
+ destdir = os.path.abspath(os.path.join(destpath, relpth))
+ destfp = os.path.join(destdir, fn)
+ _move_file(srcpath, destfp, dry_run_outdir=dry_run_outdir, base_outdir=destpath)
+
+ if dry_run_outdir:
+ import difflib
+ comparelist = []
+ for root, _, files in os.walk(dry_run_outdir):
+ for fn in files:
+ outf = os.path.join(root, fn)
+ relf = os.path.relpath(outf, dry_run_outdir)
+ logger.debug('dry-run: output file %s' % relf)
+ if fn.endswith('.bb'):
+ if origfilelist and origpath and destpath:
+ # Need to match this up with the pre-upgrade recipe file
+ for origf in origfilelist:
+ if origf.endswith('.bb'):
+ comparelist.append((os.path.abspath(os.path.join(origpath, origf)),
+ outf,
+ os.path.abspath(os.path.join(destpath, relf))))
+ break
+ else:
+ # Compare to the existing recipe
+ comparelist.append((recipefile, outf, recipefile))
+ elif fn.endswith('.bbappend'):
+ if appendfile:
+ if os.path.exists(appendfile):
+ comparelist.append((appendfile, outf, appendfile))
+ else:
+ comparelist.append((None, outf, appendfile))
+ else:
+ if destpath:
+ recipedest = destpath
+ elif appendfile:
+ recipedest = os.path.dirname(appendfile)
+ else:
+ recipedest = os.path.dirname(recipefile)
+ destfp = os.path.join(recipedest, relf)
+ if os.path.exists(destfp):
+ comparelist.append((destfp, outf, destfp))
+ output = ''
+ for oldfile, newfile, newfileshow in comparelist:
+ if oldfile:
+ with open(oldfile, 'r') as f:
+ oldlines = f.readlines()
+ else:
+ oldfile = '/dev/null'
+ oldlines = []
+ with open(newfile, 'r') as f:
+ newlines = f.readlines()
+ if not newfileshow:
+ newfileshow = newfile
+ diff = difflib.unified_diff(oldlines, newlines, oldfile, newfileshow)
+ difflines = list(diff)
+ if difflines:
+ output += ''.join(difflines)
+ if output:
+ logger.info('Diff of changed files:\n%s' % output)
+ finally:
+ tinfoil.shutdown()
+
+ # Everything else has succeeded, we can now reset
+ if args.dry_run:
+ logger.info('Resetting recipe (dry-run)')
+ else:
+ _reset([args.recipename], no_clean=no_clean, config=config, basepath=basepath, workspace=workspace)
+
+ return 0
+
+
+def get_default_srctree(config, recipename=''):
+ """Get the default srctree path"""
+ srctreeparent = config.get('General', 'default_source_parent_dir', config.workspace_path)
+ if recipename:
+ return os.path.join(srctreeparent, 'sources', recipename)
+ else:
+ return os.path.join(srctreeparent, 'sources')
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ defsrctree = get_default_srctree(context.config)
+ parser_add = subparsers.add_parser('add', help='Add a new recipe',
+ description='Adds a new recipe to the workspace to build a specified source tree. Can optionally fetch a remote URI and unpack it to create the source tree.',
+ group='starting', order=100)
+ parser_add.add_argument('recipename', nargs='?', help='Name for new recipe to add (just name - no version, path or extension). If not specified, will attempt to auto-detect it.')
+ parser_add.add_argument('srctree', nargs='?', help='Path to external source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_add.add_argument('fetchuri', nargs='?', help='Fetch the specified URI and extract it to create the source tree')
+ group = parser_add.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
+ parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
+ parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
+ group = parser_add.add_mutually_exclusive_group()
+ group.add_argument('--srcrev', '-S', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
+ group.add_argument('--autorev', '-a', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ parser_add.add_argument('--srcbranch', '-B', help='Branch in source repository if fetching from an SCM such as git (default master)')
+ parser_add.add_argument('--binary', '-b', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure). Useful with binary packages e.g. RPMs.', action='store_true')
+ parser_add.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
+ parser_add.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
+ parser_add.add_argument('--mirrors', help='Enable PREMIRRORS and MIRRORS for source tree fetching (disable by default).', action="store_true")
+ parser_add.add_argument('--provides', '-p', help='Specify an alias for the item provided by the recipe. E.g. virtual/libgl')
+ parser_add.set_defaults(func=add, fixed_setup=context.fixed_setup)
+
+ parser_modify = subparsers.add_parser('modify', help='Modify the source for an existing recipe',
+ description='Sets up the build environment to modify the source for an existing recipe. The default behaviour is to extract the source being fetched by the recipe into a git tree so you can work on it; alternatively if you already have your own pre-prepared source tree you can specify -n/--no-extract.',
+ group='starting', order=90)
+ parser_modify.add_argument('recipename', help='Name of existing recipe to edit (just name - no version, path or extension)')
+ parser_modify.add_argument('srctree', nargs='?', help='Path to external source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_modify.add_argument('--wildcard', '-w', action="store_true", help='Use wildcard for unversioned bbappend')
+ group = parser_modify.add_mutually_exclusive_group()
+ group.add_argument('--extract', '-x', action="store_true", help='Extract source for recipe (default)')
+ group.add_argument('--no-extract', '-n', action="store_true", help='Do not extract source, expect it to exist')
+ group = parser_modify.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_modify.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (when not using -n/--no-extract) (default "%(default)s")')
+ parser_modify.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
+ parser_modify.add_argument('--keep-temp', help='Keep temporary directory (for debugging)', action="store_true")
+ parser_modify.set_defaults(func=modify, fixed_setup=context.fixed_setup)
+
+ parser_extract = subparsers.add_parser('extract', help='Extract the source for an existing recipe',
+ description='Extracts the source for an existing recipe',
+ group='advanced')
+ parser_extract.add_argument('recipename', help='Name of recipe to extract the source for')
+ parser_extract.add_argument('srctree', help='Path to where to extract the source tree')
+ parser_extract.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (default "%(default)s")')
+ parser_extract.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
+ parser_extract.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_extract.set_defaults(func=extract, fixed_setup=context.fixed_setup)
+
+ parser_sync = subparsers.add_parser('sync', help='Synchronize the source tree for an existing recipe',
+ description='Synchronize the previously extracted source tree for an existing recipe',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ group='advanced')
+ parser_sync.add_argument('recipename', help='Name of recipe to sync the source for')
+ parser_sync.add_argument('srctree', help='Path to the source tree')
+ parser_sync.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout')
+ parser_sync.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_sync.set_defaults(func=sync, fixed_setup=context.fixed_setup)
+
+ parser_rename = subparsers.add_parser('rename', help='Rename a recipe file in the workspace',
+ description='Renames the recipe file for a recipe in the workspace, changing the name or version part or both, ensuring that all references within the workspace are updated at the same time. Only works when the recipe file itself is in the workspace, e.g. after devtool add. Particularly useful when devtool add did not automatically determine the correct name.',
+ group='working', order=10)
+ parser_rename.add_argument('recipename', help='Current name of recipe to rename')
+ parser_rename.add_argument('newname', nargs='?', help='New name for recipe (optional, not needed if you only want to change the version)')
+ parser_rename.add_argument('--version', '-V', help='Change the version (NOTE: this does not change the version fetched by the recipe, just the version in the recipe file name)')
+ parser_rename.add_argument('--no-srctree', '-s', action='store_true', help='Do not rename the source tree directory (if the default source tree path has been used) - keeping the old name may be desirable if there are internal/other external references to this path')
+ parser_rename.set_defaults(func=rename)
+
+ parser_update_recipe = subparsers.add_parser('update-recipe', help='Apply changes from external source tree to recipe',
+ description='Applies changes from external source tree to a recipe (updating/adding/removing patches as necessary, or by updating SRCREV). Note that these changes need to have been committed to the git repository in order to be recognised.',
+ group='working', order=-90)
+ parser_update_recipe.add_argument('recipename', help='Name of recipe to update')
+ parser_update_recipe.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
+ parser_update_recipe.add_argument('--initial-rev', help='Override starting revision for patches')
+ parser_update_recipe.add_argument('--append', '-a', help='Write changes to a bbappend in the specified layer instead of the recipe', metavar='LAYERDIR')
+ parser_update_recipe.add_argument('--wildcard-version', '-w', help='In conjunction with -a/--append, use a wildcard to make the bbappend apply to any recipe version', action='store_true')
+ parser_update_recipe.add_argument('--no-remove', '-n', action="store_true", help='Don\'t remove patches, only add or update')
+ parser_update_recipe.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
+ parser_update_recipe.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
+ parser_update_recipe.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
+ parser_update_recipe.set_defaults(func=update_recipe)
+
+ parser_status = subparsers.add_parser('status', help='Show workspace status',
+ description='Lists recipes currently in your workspace and the paths to their respective external source trees',
+ group='info', order=100)
+ parser_status.set_defaults(func=status)
+
+ parser_reset = subparsers.add_parser('reset', help='Remove a recipe from your workspace',
+ description='Removes the specified recipe(s) from your workspace (resetting its state back to that defined by the metadata).',
+ group='working', order=-100)
+ parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
+ parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
+ parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.set_defaults(func=reset)
+
+ parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
+ description='Pushes any committed changes to the specified recipe to the specified layer and removes it from your workspace. Roughly equivalent to an update-recipe followed by reset, except the update-recipe step will do the "right thing" depending on the recipe and the destination layer specified. Note that your changes must have been committed to the git repository in order to be recognised.',
+ group='working', order=-100)
+ parser_finish.add_argument('recipename', help='Recipe to finish')
+ parser_finish.add_argument('destination', help='Layer/path to put recipe into. Can be the name of a layer configured in your bblayers.conf, the path to the base of a layer, or a partial path inside a layer. %(prog)s will attempt to complete the path based on the layer\'s structure.')
+ parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
+ parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
+ parser_finish.add_argument('--force', '-f', action="store_true", help='Force continuing even if there are uncommitted changes in the source tree repository')
+ parser_finish.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
+ parser_finish.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
+ parser_finish.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
+ parser_finish.set_defaults(func=finish)
diff --git a/poky/scripts/lib/devtool/upgrade.py b/poky/scripts/lib/devtool/upgrade.py
new file mode 100644
index 000000000..c3fd866ef
--- /dev/null
+++ b/poky/scripts/lib/devtool/upgrade.py
@@ -0,0 +1,626 @@
+# Development tool - upgrade command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+"""Devtool upgrade plugin"""
+
+import os
+import sys
+import re
+import shutil
+import tempfile
+import logging
+import argparse
+import scriptutils
+import errno
+import bb
+
+devtool_path = os.path.dirname(os.path.realpath(__file__)) + '/../../../meta/lib'
+sys.path = sys.path + [devtool_path]
+
+import oe.recipeutils
+from devtool import standard
+from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build, update_unlockedsigs, check_prerelease_version
+
+logger = logging.getLogger('devtool')
+
+def _run(cmd, cwd=''):
+ logger.debug("Running command %s> %s" % (cwd,cmd))
+ return bb.process.run('%s' % cmd, cwd=cwd)
+
+def _get_srctree(tmpdir):
+ srctree = tmpdir
+ dirs = os.listdir(tmpdir)
+ if len(dirs) == 1:
+ srctree = os.path.join(tmpdir, dirs[0])
+ return srctree
+
+def _copy_source_code(orig, dest):
+ for path in standard._ls_tree(orig):
+ dest_dir = os.path.join(dest, os.path.dirname(path))
+ bb.utils.mkdirhier(dest_dir)
+ dest_path = os.path.join(dest, path)
+ shutil.move(os.path.join(orig, path), dest_path)
+
+def _remove_patch_dirs(recipefolder):
+ for root, dirs, files in os.walk(recipefolder):
+ for d in dirs:
+ shutil.rmtree(os.path.join(root,d))
+
+def _recipe_contains(rd, var):
+ rf = rd.getVar('FILE')
+ varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
+ for var, fn in varfiles.items():
+ if fn and fn.startswith(os.path.dirname(rf) + os.sep):
+ return True
+ return False
+
+def _rename_recipe_dirs(oldpv, newpv, path):
+ for root, dirs, files in os.walk(path):
+ # Rename directories with the version in their name
+ for olddir in dirs:
+ if olddir.find(oldpv) != -1:
+ newdir = olddir.replace(oldpv, newpv)
+ if olddir != newdir:
+ shutil.move(os.path.join(path, olddir), os.path.join(path, newdir))
+ # Rename any inc files with the version in their name (unusual, but possible)
+ for oldfile in files:
+ if oldfile.endswith('.inc'):
+ if oldfile.find(oldpv) != -1:
+ newfile = oldfile.replace(oldpv, newpv)
+ if oldfile != newfile:
+ os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
+
+def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
+ oldrecipe = os.path.basename(oldrecipe)
+ if oldrecipe.endswith('_%s.bb' % oldpv):
+ newrecipe = '%s_%s.bb' % (bpn, newpv)
+ if oldrecipe != newrecipe:
+ shutil.move(os.path.join(path, oldrecipe), os.path.join(path, newrecipe))
+ else:
+ newrecipe = oldrecipe
+ return os.path.join(path, newrecipe)
+
+def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
+ _rename_recipe_dirs(oldpv, newpv, path)
+ return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
+
+def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
+ """Writes an append file"""
+ if not os.path.exists(rc):
+ raise DevtoolError("bbappend not created because %s does not exist" % rc)
+
+ appendpath = os.path.join(workspace, 'appends')
+ if not os.path.exists(appendpath):
+ bb.utils.mkdirhier(appendpath)
+
+ brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
+
+ srctree = os.path.abspath(srctree)
+ pn = d.getVar('PN')
+ af = os.path.join(appendpath, '%s.bbappend' % brf)
+ with open(af, 'w') as f:
+ f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
+ f.write('inherit externalsrc\n')
+ f.write(('# NOTE: We use pn- overrides here to avoid affecting'
+ 'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
+ f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ b_is_s = use_external_build(same_dir, no_same_dir, d)
+ if b_is_s:
+ f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('\n')
+ if rev:
+ f.write('# initial_rev: %s\n' % rev)
+ if copied:
+ f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
+ f.write('# original_files: %s\n' % ' '.join(copied))
+ return af
+
+def _cleanup_on_error(rf, srctree):
+ rfp = os.path.split(rf)[0] # recipe folder
+ rfpp = os.path.split(rfp)[0] # recipes folder
+ if os.path.exists(rfp):
+ shutil.rmtree(b)
+ if not len(os.listdir(rfpp)):
+ os.rmdir(rfpp)
+ srctree = os.path.abspath(srctree)
+ if os.path.exists(srctree):
+ shutil.rmtree(srctree)
+
+def _upgrade_error(e, rf, srctree):
+ if rf:
+ cleanup_on_error(rf, srctree)
+ logger.error(e)
+ raise DevtoolError(e)
+
+def _get_uri(rd):
+ srcuris = rd.getVar('SRC_URI').split()
+ if not len(srcuris):
+ raise DevtoolError('SRC_URI not found on recipe')
+ # Get first non-local entry in SRC_URI - usually by convention it's
+ # the first entry, but not always!
+ srcuri = None
+ for entry in srcuris:
+ if not entry.startswith('file://'):
+ srcuri = entry
+ break
+ if not srcuri:
+ raise DevtoolError('Unable to find non-local entry in SRC_URI')
+ srcrev = '${AUTOREV}'
+ if '://' in srcuri:
+ # Fetch a URL
+ rev_re = re.compile(';rev=([^;]+)')
+ res = rev_re.search(srcuri)
+ if res:
+ srcrev = res.group(1)
+ srcuri = rev_re.sub('', srcuri)
+ return srcuri, srcrev
+
+def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, keep_temp, tinfoil, rd):
+ """Extract sources of a recipe with a new version"""
+
+ def __run(cmd):
+ """Simple wrapper which calls _run with srctree as cwd"""
+ return _run(cmd, srctree)
+
+ crd = rd.createCopy()
+
+ pv = crd.getVar('PV')
+ crd.setVar('PV', newpv)
+
+ tmpsrctree = None
+ uri, rev = _get_uri(crd)
+ if srcrev:
+ rev = srcrev
+ if uri.startswith('git://'):
+ __run('git fetch')
+ __run('git checkout %s' % rev)
+ __run('git tag -f devtool-base-new')
+ md5 = None
+ sha256 = None
+ _, _, _, _, _, params = bb.fetch2.decodeurl(uri)
+ srcsubdir_rel = params.get('destsuffix', 'git')
+ if not srcbranch:
+ check_branch, check_branch_err = __run('git branch -r --contains %s' % srcrev)
+ get_branch = [x.strip() for x in check_branch.splitlines()]
+ # Remove HEAD reference point and drop remote prefix
+ get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
+ if 'master' in get_branch:
+ # If it is master, we do not need to append 'branch=master' as this is default.
+ # Even with the case where get_branch has multiple objects, if 'master' is one
+ # of them, we should default take from 'master'
+ srcbranch = ''
+ elif len(get_branch) == 1:
+ # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ srcbranch = get_branch[0]
+ else:
+ # If get_branch contains more than one objects, then display error and exit.
+ mbrch = '\n ' + '\n '.join(get_branch)
+ raise DevtoolError('Revision %s was found on multiple branches: %s\nPlease provide the correct branch in the devtool command with "--srcbranch" or "-B" option.' % (srcrev, mbrch))
+ else:
+ __run('git checkout devtool-base -b devtool-%s' % newpv)
+
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ checksums, ftmpdir = scriptutils.fetch_url(tinfoil, uri, rev, tmpdir, logger, preserve_tmp=keep_temp)
+ except scriptutils.FetchUrlFailure as e:
+ raise DevtoolError(e)
+
+ if ftmpdir and keep_temp:
+ logger.info('Fetch temp directory is %s' % ftmpdir)
+
+ md5 = checksums['md5sum']
+ sha256 = checksums['sha256sum']
+
+ tmpsrctree = _get_srctree(tmpdir)
+ srctree = os.path.abspath(srctree)
+ srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
+
+ # Delete all sources so we ensure no stray files are left over
+ for item in os.listdir(srctree):
+ if item in ['.git', 'oe-local-files']:
+ continue
+ itempath = os.path.join(srctree, item)
+ if os.path.isdir(itempath):
+ shutil.rmtree(itempath)
+ else:
+ os.remove(itempath)
+
+ # Copy in new ones
+ _copy_source_code(tmpsrctree, srctree)
+
+ (stdout,_) = __run('git ls-files --modified --others --exclude-standard')
+ filelist = stdout.splitlines()
+ pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
+ pbar.start()
+ batchsize = 100
+ for i in range(0, len(filelist), batchsize):
+ batch = filelist[i:i+batchsize]
+ __run('git add -A %s' % ' '.join(['"%s"' % item for item in batch]))
+ pbar.update(i)
+ pbar.finish()
+
+ useroptions = []
+ oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
+ __run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
+ __run('git tag -f devtool-base-%s' % newpv)
+
+ (stdout, _) = __run('git rev-parse HEAD')
+ rev = stdout.rstrip()
+
+ if no_patch:
+ patches = oe.recipeutils.get_recipe_patches(crd)
+ if patches:
+ logger.warn('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
+ else:
+ __run('git checkout devtool-patched -b %s' % branch)
+ skiptag = False
+ try:
+ __run('git rebase %s' % rev)
+ except bb.process.ExecutionError as e:
+ skiptag = True
+ if 'conflict' in e.stdout:
+ logger.warn('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ else:
+ logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+ if not skiptag:
+ if uri.startswith('git://'):
+ suffix = 'new'
+ else:
+ suffix = newpv
+ __run('git tag -f devtool-patched-%s' % suffix)
+
+ if tmpsrctree:
+ if keep_temp:
+ logger.info('Preserving temporary directory %s' % tmpsrctree)
+ else:
+ shutil.rmtree(tmpsrctree)
+
+ return (rev, md5, sha256, srcbranch, srcsubdir_rel)
+
+def _add_license_diff_to_recipe(path, diff):
+ notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
+# The following is the difference between the old and the new license text.
+# Please update the LICENSE value if needed, and summarize the changes in
+# the commit message via 'License-Update:' tag.
+# (example: 'License-Update: copyright years updated.')
+#
+# The changes:
+#
+"""
+ commented_diff = "\n".join(["# {}".format(l) for l in diff.split('\n')])
+ with open(path, 'rb') as f:
+ orig_content = f.read()
+ with open(path, 'wb') as f:
+ f.write(notice_text.encode())
+ f.write(commented_diff.encode())
+ f.write("\n#\n\n".encode())
+ f.write(orig_content)
+
+def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses):
+ """Creates the new recipe under workspace"""
+
+ bpn = rd.getVar('BPN')
+ path = os.path.join(workspace, 'recipes', bpn)
+ bb.utils.mkdirhier(path)
+ copied, _ = oe.recipeutils.copy_recipe_files(rd, path, all_variants=True)
+ if not copied:
+ raise DevtoolError('Internal error - no files were copied for recipe %s' % bpn)
+ logger.debug('Copied %s to %s' % (copied, path))
+
+ oldpv = rd.getVar('PV')
+ if not newpv:
+ newpv = oldpv
+ origpath = rd.getVar('FILE')
+ fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
+ logger.debug('Upgraded %s => %s' % (origpath, fullpath))
+
+ newvalues = {}
+ if _recipe_contains(rd, 'PV') and newpv != oldpv:
+ newvalues['PV'] = newpv
+
+ if srcrev:
+ newvalues['SRCREV'] = srcrev
+
+ if srcbranch:
+ src_uri = oe.recipeutils.split_var_value(rd.getVar('SRC_URI', False) or '')
+ changed = False
+ replacing = True
+ new_src_uri = []
+ for entry in src_uri:
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ if replacing and scheme in ['git', 'gitsm']:
+ branch = params.get('branch', 'master')
+ if rd.expand(branch) != srcbranch:
+ # Handle case where branch is set through a variable
+ res = re.match(r'\$\{([^}@]+)\}', branch)
+ if res:
+ newvalues[res.group(1)] = srcbranch
+ # We know we won't change SRC_URI now, so break out
+ break
+ else:
+ params['branch'] = srcbranch
+ entry = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+ changed = True
+ replacing = False
+ new_src_uri.append(entry)
+ if changed:
+ newvalues['SRC_URI'] = ' '.join(new_src_uri)
+
+ newvalues['PR'] = None
+
+ # Work out which SRC_URI entries have changed in case the entry uses a name
+ crd = rd.createCopy()
+ crd.setVar('PV', newpv)
+ for var, value in newvalues.items():
+ crd.setVar(var, value)
+ old_src_uri = (rd.getVar('SRC_URI') or '').split()
+ new_src_uri = (crd.getVar('SRC_URI') or '').split()
+ newnames = []
+ addnames = []
+ for newentry in new_src_uri:
+ _, _, _, _, _, params = bb.fetch2.decodeurl(newentry)
+ if 'name' in params:
+ newnames.append(params['name'])
+ if newentry not in old_src_uri:
+ addnames.append(params['name'])
+ # Find what's been set in the original recipe
+ oldnames = []
+ noname = False
+ for varflag in rd.getVarFlags('SRC_URI'):
+ if varflag.endswith(('.md5sum', '.sha256sum')):
+ name = varflag.rsplit('.', 1)[0]
+ if name not in oldnames:
+ oldnames.append(name)
+ elif varflag in ['md5sum', 'sha256sum']:
+ noname = True
+ # Even if SRC_URI has named entries it doesn't have to actually use the name
+ if noname and addnames and addnames[0] not in oldnames:
+ addnames = []
+ # Drop any old names (the name actually might include ${PV})
+ for name in oldnames:
+ if name not in newnames:
+ newvalues['SRC_URI[%s.md5sum]' % name] = None
+ newvalues['SRC_URI[%s.sha256sum]' % name] = None
+
+ if md5 and sha256:
+ if addnames:
+ nameprefix = '%s.' % addnames[0]
+ else:
+ nameprefix = ''
+ newvalues['SRC_URI[%smd5sum]' % nameprefix] = md5
+ newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
+
+ if srcsubdir_new != srcsubdir_old:
+ s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
+ s_subdir_new = os.path.relpath(os.path.abspath(crd.getVar('S')), crd.getVar('WORKDIR'))
+ if srcsubdir_old == s_subdir_old and srcsubdir_new != s_subdir_new:
+ # Subdir for old extracted source matches what S points to (it should!)
+ # but subdir for new extracted source doesn't match what S will be
+ newvalues['S'] = '${WORKDIR}/%s' % srcsubdir_new.replace(newpv, '${PV}')
+ if crd.expand(newvalues['S']) == crd.expand('${WORKDIR}/${BP}'):
+ # It's the default, drop it
+ # FIXME what if S is being set in a .inc?
+ newvalues['S'] = None
+ logger.info('Source subdirectory has changed, dropping S value since it now matches the default ("${WORKDIR}/${BP}")')
+ else:
+ logger.info('Source subdirectory has changed, updating S value')
+
+ if license_diff:
+ newlicchksum = " ".join(["file://{};md5={}".format(l["path"], l["actual_md5"]) + (";beginline={}".format(l["beginline"]) if l["beginline"] else "") + (";endline={}".format(l["endline"]) if l["endline"] else "") for l in new_licenses])
+ newvalues["LIC_FILES_CHKSUM"] = newlicchksum
+ _add_license_diff_to_recipe(fullpath, license_diff)
+
+ rd = tinfoil.parse_recipe_file(fullpath, False)
+ oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
+
+ return fullpath, copied
+
+
+def _check_git_config():
+ def getconfig(name):
+ try:
+ value = bb.process.run('git config --global %s' % name)[0].strip()
+ except bb.process.ExecutionError as e:
+ if e.exitcode == 1:
+ value = None
+ else:
+ raise
+ return value
+
+ username = getconfig('user.name')
+ useremail = getconfig('user.email')
+ configerr = []
+ if not username:
+ configerr.append('Please set your name using:\n git config --global user.name')
+ if not useremail:
+ configerr.append('Please set your email using:\n git config --global user.email')
+ if configerr:
+ raise DevtoolError('Your git configuration is incomplete which will prevent rebases from working:\n' + '\n'.join(configerr))
+
+def _extract_licenses(srcpath, recipe_licenses):
+ licenses = []
+ for url in recipe_licenses.split():
+ license = {}
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
+ license['path'] = path
+ license['md5'] = parm.get('md5', '')
+ license['beginline'], license['endline'] = 0, 0
+ if 'beginline' in parm:
+ license['beginline'] = int(parm['beginline'])
+ if 'endline' in parm:
+ license['endline'] = int(parm['endline'])
+ license['text'] = []
+ with open(os.path.join(srcpath, path), 'rb') as f:
+ import hashlib
+ actual_md5 = hashlib.md5()
+ lineno = 0
+ for line in f:
+ lineno += 1
+ if (lineno >= license['beginline']) and ((lineno <= license['endline']) or not license['endline']):
+ license['text'].append(line.decode(errors='ignore'))
+ actual_md5.update(line)
+ license['actual_md5'] = actual_md5.hexdigest()
+ licenses.append(license)
+ return licenses
+
+def _generate_license_diff(old_licenses, new_licenses):
+ need_diff = False
+ for l in new_licenses:
+ if l['md5'] != l['actual_md5']:
+ need_diff = True
+ break
+ if need_diff == False:
+ return None
+
+ import difflib
+ diff = ''
+ for old, new in zip(old_licenses, new_licenses):
+ for line in difflib.unified_diff(old['text'], new['text'], old['path'], new['path']):
+ diff = diff + line
+ return diff
+
+def upgrade(args, config, basepath, workspace):
+ """Entry point for the devtool 'upgrade' subcommand"""
+
+ if args.recipename in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" % args.recipename)
+ if args.srcbranch and not args.srcrev:
+ raise DevtoolError("If you specify --srcbranch/-B then you must use --srcrev/-S to specify the revision" % args.recipename)
+
+ _check_git_config()
+
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+
+ pn = rd.getVar('PN')
+ if pn != args.recipename:
+ logger.info('Mapping %s to %s' % (args.recipename, pn))
+ if pn in workspace:
+ raise DevtoolError("recipe %s is already in your workspace" % pn)
+
+ if args.srctree:
+ srctree = os.path.abspath(args.srctree)
+ else:
+ srctree = standard.get_default_srctree(config, pn)
+
+ # try to automatically discover latest version and revision if not provided on command line
+ if not args.version and not args.srcrev:
+ version_info = oe.recipeutils.get_recipe_upstream_version(rd)
+ if version_info['version'] and not version_info['version'].endswith("new-commits-available"):
+ args.version = version_info['version']
+ if version_info['revision']:
+ args.srcrev = version_info['revision']
+ if not args.version and not args.srcrev:
+ raise DevtoolError("Automatic discovery of latest version/revision failed - you must provide a version using the --version/-V option, or for recipes that fetch from an SCM such as git, the --srcrev/-S option.")
+
+ standard._check_compatible_recipe(pn, rd)
+ old_srcrev = rd.getVar('SRCREV')
+ if old_srcrev == 'INVALID':
+ old_srcrev = None
+ if old_srcrev and not args.srcrev:
+ raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
+ old_ver = rd.getVar('PV')
+ if old_ver == args.version and old_srcrev == args.srcrev:
+ raise DevtoolError("Current and upgrade versions are the same version")
+ if args.version:
+ if bb.utils.vercmp_string(args.version, old_ver) < 0:
+ logger.warning('Upgrade version %s compares as less than the current version %s. If you are using a package feed for on-target upgrades or providing this recipe for general consumption, then you should increment PE in the recipe (or if there is no current PE value set, set it to "1")' % (args.version, old_ver))
+ check_prerelease_version(args.version, 'devtool upgrade')
+
+ rf = None
+ license_diff = None
+ try:
+ logger.info('Extracting current version source...')
+ rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ logger.info('Extracting upgraded version source...')
+ rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
+ args.srcrev, args.srcbranch, args.branch, args.keep_temp,
+ tinfoil, rd)
+ new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ license_diff = _generate_license_diff(old_licenses, new_licenses)
+ rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses)
+ except bb.process.CmdError as e:
+ _upgrade_error(e, rf, srctree)
+ except DevtoolError as e:
+ _upgrade_error(e, rf, srctree)
+ standard._add_md5(config, pn, os.path.dirname(rf))
+
+ af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ copied, config.workspace_path, rd)
+ standard._add_md5(config, pn, af)
+
+ update_unlockedsigs(basepath, workspace, args.fixed_setup, [pn])
+
+ logger.info('Upgraded source extracted to %s' % srctree)
+ logger.info('New recipe is %s' % rf)
+ if license_diff:
+ logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
+ finally:
+ tinfoil.shutdown()
+ return 0
+
+def latest_version(args, config, basepath, workspace):
+ """Entry point for the devtool 'latest_version' subcommand"""
+ tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ return 1
+ version_info = oe.recipeutils.get_recipe_upstream_version(rd)
+ # "new-commits-available" is an indication that upstream never issues version tags
+ if not version_info['version'].endswith("new-commits-available"):
+ logger.info("Current version: {}".format(version_info['current_version']))
+ logger.info("Latest version: {}".format(version_info['version']))
+ if version_info['revision']:
+ logger.info("Latest version's commit: {}".format(version_info['revision']))
+ else:
+ logger.info("Latest commit: {}".format(version_info['revision']))
+ finally:
+ tinfoil.shutdown()
+ return 0
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ defsrctree = standard.get_default_srctree(context.config)
+
+ parser_upgrade = subparsers.add_parser('upgrade', help='Upgrade an existing recipe',
+ description='Upgrades an existing recipe to a new upstream version. Puts the upgraded recipe file into the workspace along with any associated files, and extracts the source tree to a specified location (in case patches need rebasing or adding to as a result of the upgrade).',
+ group='starting')
+ parser_upgrade.add_argument('recipename', help='Name of recipe to upgrade (just name - no version, path or extension)')
+ parser_upgrade.add_argument('srctree', nargs='?', help='Path to where to extract the source tree. If not specified, a subdirectory of %s will be used.' % defsrctree)
+ parser_upgrade.add_argument('--version', '-V', help='Version to upgrade to (PV). If omitted, latest upstream version will be determined and used, if possible.')
+ parser_upgrade.add_argument('--srcrev', '-S', help='Source revision to upgrade to (useful when fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--srcbranch', '-B', help='Branch in source repository containing the revision to use (if fetching from an SCM such as git)')
+ parser_upgrade.add_argument('--branch', '-b', default="devtool", help='Name for new development branch to checkout (default "%(default)s")')
+ parser_upgrade.add_argument('--no-patch', action="store_true", help='Do not apply patches from the recipe to the new source code')
+ parser_upgrade.add_argument('--no-overrides', '-O', action="store_true", help='Do not create branches for other override configurations')
+ group = parser_upgrade.add_mutually_exclusive_group()
+ group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
+ group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
+ parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
+
+ parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
+ description='Queries the upstream server for what the latest upstream release is (for git, tags are checked, for tarballs, a list of them is obtained, and one with the highest version number is reported)',
+ group='info')
+ parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
+ parser_latest_version.set_defaults(func=latest_version)
diff --git a/poky/scripts/lib/devtool/utilcmds.py b/poky/scripts/lib/devtool/utilcmds.py
new file mode 100644
index 000000000..7cd139fb8
--- /dev/null
+++ b/poky/scripts/lib/devtool/utilcmds.py
@@ -0,0 +1,252 @@
+# Development tool - utility commands plugin
+#
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool utility plugins"""
+
+import os
+import sys
+import shutil
+import tempfile
+import logging
+import argparse
+import subprocess
+import scriptutils
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
+
+logger = logging.getLogger('devtool')
+
+def _find_recipe_path(args, config, basepath, workspace):
+ if args.any_recipe:
+ logger.warning('-a/--any-recipe option is now always active, and thus the option will be removed in a future release')
+ if args.recipename in workspace:
+ recipefile = workspace[args.recipename]['recipefile']
+ else:
+ recipefile = None
+ if not recipefile:
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, True)
+ if not rd:
+ raise DevtoolError("Failed to find specified recipe")
+ recipefile = rd.getVar('FILE')
+ finally:
+ tinfoil.shutdown()
+ return recipefile
+
+
+def find_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'find-recipe' subcommand"""
+ recipefile = _find_recipe_path(args, config, basepath, workspace)
+ print(recipefile)
+ return 0
+
+
+def edit_recipe(args, config, basepath, workspace):
+ """Entry point for the devtool 'edit-recipe' subcommand"""
+ return scriptutils.run_editor(_find_recipe_path(args, config, basepath, workspace), logger)
+
+
+def configure_help(args, config, basepath, workspace):
+ """Entry point for the devtool 'configure-help' subcommand"""
+ import oe.utils
+
+ check_workspace_recipe(workspace, args.recipename)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ b = rd.getVar('B')
+ s = rd.getVar('S')
+ configurescript = os.path.join(s, 'configure')
+ confdisabled = 'noexec' in rd.getVarFlags('do_configure') or 'do_configure' not in (rd.getVar('__BBTASKS', False) or [])
+ configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS') or '')
+ extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF') or '')
+ extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE') or '')
+ do_configure = rd.getVar('do_configure') or ''
+ do_configure_noexpand = rd.getVar('do_configure', False) or ''
+ packageconfig = rd.getVarFlags('PACKAGECONFIG') or []
+ autotools = bb.data.inherits_class('autotools', rd) and ('oe_runconf' in do_configure or 'autotools_do_configure' in do_configure)
+ cmake = bb.data.inherits_class('cmake', rd) and ('cmake_do_configure' in do_configure)
+ cmake_do_configure = rd.getVar('cmake_do_configure')
+ pn = rd.getVar('PN')
+ finally:
+ tinfoil.shutdown()
+
+ if 'doc' in packageconfig:
+ del packageconfig['doc']
+
+ if autotools and not os.path.exists(configurescript):
+ logger.info('Running do_configure to generate configure script')
+ try:
+ stdout, _ = exec_build_env_command(config.init_path, basepath,
+ 'bitbake -c configure %s' % args.recipename,
+ stderr=subprocess.STDOUT)
+ except bb.process.ExecutionError:
+ pass
+
+ if confdisabled or do_configure.strip() in ('', ':'):
+ raise DevtoolError("do_configure task has been disabled for this recipe")
+ elif args.no_pager and not os.path.exists(configurescript):
+ raise DevtoolError("No configure script found and no other information to display")
+ else:
+ configopttext = ''
+ if autotools and configureopts:
+ configopttext = '''
+Arguments currently passed to the configure script:
+
+%s
+
+Some of those are fixed.''' % (configureopts + ' ' + extra_oeconf)
+ if extra_oeconf:
+ configopttext += ''' The ones that are specified through EXTRA_OECONF (which you can change or add to easily):
+
+%s''' % extra_oeconf
+
+ elif cmake:
+ in_cmake = False
+ cmake_cmd = ''
+ for line in cmake_do_configure.splitlines():
+ if in_cmake:
+ cmake_cmd = cmake_cmd + ' ' + line.strip().rstrip('\\')
+ if not line.endswith('\\'):
+ break
+ if line.lstrip().startswith('cmake '):
+ cmake_cmd = line.strip().rstrip('\\')
+ if line.endswith('\\'):
+ in_cmake = True
+ else:
+ break
+ if cmake_cmd:
+ configopttext = '''
+The current cmake command line:
+
+%s
+
+Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
+
+%s''' % (oe.utils.squashspaces(cmake_cmd), extra_oecmake)
+ else:
+ configopttext = '''
+The current implementation of cmake_do_configure:
+
+cmake_do_configure() {
+%s
+}
+
+Arguments specified through EXTRA_OECMAKE (which you can change or add to easily)
+
+%s''' % (cmake_do_configure.rstrip(), extra_oecmake)
+
+ elif do_configure:
+ configopttext = '''
+The current implementation of do_configure:
+
+do_configure() {
+%s
+}''' % do_configure.rstrip()
+ if '${EXTRA_OECONF}' in do_configure_noexpand:
+ configopttext += '''
+
+Arguments specified through EXTRA_OECONF (which you can change or add to easily):
+
+%s''' % extra_oeconf
+
+ if packageconfig:
+ configopttext += '''
+
+Some of these options may be controlled through PACKAGECONFIG; for more details please see the recipe.'''
+
+ if args.arg:
+ helpargs = ' '.join(args.arg)
+ elif cmake:
+ helpargs = '-LH'
+ else:
+ helpargs = '--help'
+
+ msg = '''configure information for %s
+------------------------------------------
+%s''' % (pn, configopttext)
+
+ if cmake:
+ msg += '''
+
+The cmake %s output for %s follows. After "-- Cache values" you should see a list of variables you can add to EXTRA_OECMAKE (prefixed with -D and suffixed with = followed by the desired value, without any spaces).
+------------------------------------------''' % (helpargs, pn)
+ elif os.path.exists(configurescript):
+ msg += '''
+
+The ./configure %s output for %s follows.
+------------------------------------------''' % (helpargs, pn)
+
+ olddir = os.getcwd()
+ tmppath = tempfile.mkdtemp()
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ if not args.no_header:
+ tf.write(msg + '\n')
+ tf.close()
+ try:
+ try:
+ cmd = 'cat %s' % tf.name
+ if cmake:
+ cmd += '; cmake %s %s 2>&1' % (helpargs, s)
+ os.chdir(b)
+ elif os.path.exists(configurescript):
+ cmd += '; %s %s' % (configurescript, helpargs)
+ if sys.stdout.isatty() and not args.no_pager:
+ pager = os.environ.get('PAGER', 'less')
+ cmd = '(%s) | %s' % (cmd, pager)
+ subprocess.check_call(cmd, shell=True)
+ except subprocess.CalledProcessError as e:
+ return e.returncode
+ finally:
+ os.chdir(olddir)
+ shutil.rmtree(tmppath)
+ os.remove(tf.name)
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+ parser_edit_recipe = subparsers.add_parser('edit-recipe', help='Edit a recipe file',
+ description='Runs the default editor (as specified by the EDITOR variable) on the specified recipe. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
+ group='working')
+ parser_edit_recipe.add_argument('recipename', help='Recipe to edit')
+ # FIXME drop -a at some point in future
+ parser_edit_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
+ parser_edit_recipe.set_defaults(func=edit_recipe)
+
+ # Find-recipe
+ parser_find_recipe = subparsers.add_parser('find-recipe', help='Find a recipe file',
+ description='Finds a recipe file. Note that this will be quicker for recipes in the workspace as the cache does not need to be loaded in that case.',
+ group='working')
+ parser_find_recipe.add_argument('recipename', help='Recipe to find')
+ # FIXME drop -a at some point in future
+ parser_find_recipe.add_argument('--any-recipe', '-a', action="store_true", help='Does nothing (exists for backwards-compatibility)')
+ parser_find_recipe.set_defaults(func=find_recipe)
+
+ # NOTE: Needed to override the usage string here since the default
+ # gets the order wrong - recipename must come before --arg
+ parser_configure_help = subparsers.add_parser('configure-help', help='Get help on configure script options',
+ usage='devtool configure-help [options] recipename [--arg ...]',
+ description='Displays the help for the configure script for the specified recipe (i.e. runs ./configure --help) prefaced by a header describing the current options being specified. Output is piped through less (or whatever PAGER is set to, if set) for easy browsing.',
+ group='working')
+ parser_configure_help.add_argument('recipename', help='Recipe to show configure help for')
+ parser_configure_help.add_argument('-p', '--no-pager', help='Disable paged output', action="store_true")
+ parser_configure_help.add_argument('-n', '--no-header', help='Disable explanatory header text', action="store_true")
+ parser_configure_help.add_argument('--arg', help='Pass remaining arguments to the configure script instead of --help (useful if the script has additional help options)', nargs=argparse.REMAINDER)
+ parser_configure_help.set_defaults(func=configure_help)
diff --git a/poky/scripts/lib/recipetool/__init__.py b/poky/scripts/lib/recipetool/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/scripts/lib/recipetool/__init__.py
diff --git a/poky/scripts/lib/recipetool/append.py b/poky/scripts/lib/recipetool/append.py
new file mode 100644
index 000000000..69c8bb77a
--- /dev/null
+++ b/poky/scripts/lib/recipetool/append.py
@@ -0,0 +1,457 @@
+# Recipe creation tool - append plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import subprocess
+import logging
+import stat
+import shutil
+import scriptutils
+import errno
+from collections import defaultdict
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+# FIXME guessing when we don't have pkgdata?
+# FIXME mode to create patch rather than directly substitute
+
+class InvalidTargetFileError(Exception):
+ pass
+
+def find_target_file(targetpath, d, pkglist=None):
+ """Find the recipe installing the specified target path, optionally limited to a select list of packages"""
+ import json
+
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+
+ # The mix between /etc and ${sysconfdir} here may look odd, but it is just
+ # being consistent with usage elsewhere
+ invalidtargets = {'${sysconfdir}/version': '${sysconfdir}/version is written out at image creation time',
+ '/etc/timestamp': '/etc/timestamp is written out at image creation time',
+ '/dev/*': '/dev is handled by udev (or equivalent) and the kernel (devtmpfs)',
+ '/etc/passwd': '/etc/passwd should be managed through the useradd and extrausers classes',
+ '/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
+ '/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
+ '/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
+ '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
+
+ for pthspec, message in invalidtargets.items():
+ if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
+ raise InvalidTargetFileError(d.expand(message))
+
+ targetpath_re = re.compile(r'\s+(\$D)?%s(\s|$)' % targetpath)
+
+ recipes = defaultdict(list)
+ for root, dirs, files in os.walk(os.path.join(pkgdata_dir, 'runtime')):
+ if pkglist:
+ filelist = pkglist
+ else:
+ filelist = files
+ for fn in filelist:
+ pkgdatafile = os.path.join(root, fn)
+ if pkglist and not os.path.exists(pkgdatafile):
+ continue
+ with open(pkgdatafile, 'r') as f:
+ pn = ''
+ # This does assume that PN comes before other values, but that's a fairly safe assumption
+ for line in f:
+ if line.startswith('PN:'):
+ pn = line.split(':', 1)[1].strip()
+ elif line.startswith('FILES_INFO:'):
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ for fullpth in dictval.keys():
+ if fnmatch.fnmatchcase(fullpth, targetpath):
+ recipes[targetpath].append(pn)
+ elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
+ scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
+ if 'update-alternatives --install %s ' % targetpath in scriptval:
+ recipes[targetpath].append('?%s' % pn)
+ elif targetpath_re.search(scriptval):
+ recipes[targetpath].append('!%s' % pn)
+ return recipes
+
+def _parse_recipe(pn, tinfoil):
+ try:
+ rd = tinfoil.parse_recipe(pn)
+ except bb.providers.NoProvider as e:
+ logger.error(str(e))
+ return None
+ return rd
+
+def determine_file_source(targetpath, rd):
+ """Assuming we know a file came from a specific recipe, figure out exactly where it came from"""
+ import oe.recipeutils
+
+ # See if it's in do_install for the recipe
+ workdir = rd.getVar('WORKDIR')
+ src_uri = rd.getVar('SRC_URI')
+ srcfile = ''
+ modpatches = []
+ elements = check_do_install(rd, targetpath)
+ if elements:
+ logger.debug('do_install line:\n%s' % ' '.join(elements))
+ srcpath = get_source_path(elements)
+ logger.debug('source path: %s' % srcpath)
+ if not srcpath.startswith('/'):
+ # Handle non-absolute path
+ srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs').split()[-1], srcpath))
+ if srcpath.startswith(workdir):
+ # OK, now we have the source file name, look for it in SRC_URI
+ workdirfile = os.path.relpath(srcpath, workdir)
+ # FIXME this is where we ought to have some code in the fetcher, because this is naive
+ for item in src_uri.split():
+ localpath = bb.fetch2.localpath(item, rd)
+ # Source path specified in do_install might be a glob
+ if fnmatch.fnmatch(os.path.basename(localpath), workdirfile):
+ srcfile = 'file://%s' % localpath
+ elif '/' in workdirfile:
+ if item == 'file://%s' % workdirfile:
+ srcfile = 'file://%s' % localpath
+
+ # Check patches
+ srcpatches = []
+ patchedfiles = oe.recipeutils.get_recipe_patched_files(rd)
+ for patch, filelist in patchedfiles.items():
+ for fileitem in filelist:
+ if fileitem[0] == srcpath:
+ srcpatches.append((patch, fileitem[1]))
+ if srcpatches:
+ addpatch = None
+ for patch in srcpatches:
+ if patch[1] == 'A':
+ addpatch = patch[0]
+ else:
+ modpatches.append(patch[0])
+ if addpatch:
+ srcfile = 'patch://%s' % addpatch
+
+ return (srcfile, elements, modpatches)
+
+def get_source_path(cmdelements):
+ """Find the source path specified within a command"""
+ command = cmdelements[0]
+ if command in ['install', 'cp']:
+ helptext = subprocess.check_output('LC_ALL=C %s --help' % command, shell=True).decode('utf-8')
+ argopts = ''
+ argopt_line_re = re.compile('^-([a-zA-Z0-9]), --[a-z-]+=')
+ for line in helptext.splitlines():
+ line = line.lstrip()
+ res = argopt_line_re.search(line)
+ if res:
+ argopts += res.group(1)
+ if not argopts:
+ # Fallback
+ if command == 'install':
+ argopts = 'gmoSt'
+ elif command == 'cp':
+ argopts = 't'
+ else:
+ raise Exception('No fallback arguments for command %s' % command)
+
+ skipnext = False
+ for elem in cmdelements[1:-1]:
+ if elem.startswith('-'):
+ if len(elem) > 1 and elem[1] in argopts:
+ skipnext = True
+ continue
+ if skipnext:
+ skipnext = False
+ continue
+ return elem
+ else:
+ raise Exception('get_source_path: no handling for command "%s"')
+
+def get_func_deps(func, d):
+ """Find the function dependencies of a shell function"""
+ deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
+ deps |= set((d.getVarFlag(func, "vardeps") or "").split())
+ funcdeps = []
+ for dep in deps:
+ if d.getVarFlag(dep, 'func'):
+ funcdeps.append(dep)
+ return funcdeps
+
+def check_do_install(rd, targetpath):
+ """Look at do_install for a command that installs/copies the specified target path"""
+ instpath = os.path.abspath(os.path.join(rd.getVar('D'), targetpath.lstrip('/')))
+ do_install = rd.getVar('do_install')
+ # Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
+ deps = get_func_deps('do_install', rd)
+ for dep in deps:
+ do_install = do_install.replace(dep, rd.getVar(dep))
+
+ # Look backwards through do_install as we want to catch where a later line (perhaps
+ # from a bbappend) is writing over the top
+ for line in reversed(do_install.splitlines()):
+ line = line.strip()
+ if (line.startswith('install ') and ' -m' in line) or line.startswith('cp '):
+ elements = line.split()
+ destpath = os.path.abspath(elements[-1])
+ if destpath == instpath:
+ return elements
+ elif destpath.rstrip('/') == os.path.dirname(instpath):
+ # FIXME this doesn't take recursive copy into account; unsure if it's practical to do so
+ srcpath = get_source_path(elements)
+ if fnmatch.fnmatchcase(os.path.basename(instpath), os.path.basename(srcpath)):
+ return elements
+ return None
+
+
+def appendfile(args):
+ import oe.recipeutils
+
+ stdout = ''
+ try:
+ (stdout, _) = bb.process.run('LANG=C file -b %s' % args.newfile, shell=True)
+ if 'cannot open' in stdout:
+ raise bb.process.ExecutionError(stdout)
+ except bb.process.ExecutionError as err:
+ logger.debug('file command returned error: %s' % err)
+ stdout = ''
+ if stdout:
+ logger.debug('file command output: %s' % stdout.rstrip())
+ if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
+ logger.warn('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
+
+ if args.recipe:
+ recipes = {args.targetpath: [args.recipe],}
+ else:
+ try:
+ recipes = find_target_file(args.targetpath, tinfoil.config_data)
+ except InvalidTargetFileError as e:
+ logger.error('%s cannot be handled by this tool: %s' % (args.targetpath, e))
+ return 1
+ if not recipes:
+ logger.error('Unable to find any package producing path %s - this may be because the recipe packaging it has not been built yet' % args.targetpath)
+ return 1
+
+ alternative_pns = []
+ postinst_pns = []
+
+ selectpn = None
+ for targetpath, pnlist in recipes.items():
+ for pn in pnlist:
+ if pn.startswith('?'):
+ alternative_pns.append(pn[1:])
+ elif pn.startswith('!'):
+ postinst_pns.append(pn[1:])
+ elif selectpn:
+ # hit here with multilibs
+ continue
+ else:
+ selectpn = pn
+
+ if not selectpn and len(alternative_pns) == 1:
+ selectpn = alternative_pns[0]
+ logger.error('File %s is an alternative possibly provided by recipe %s but seemingly no other, selecting it by default - you should double check other recipes' % (args.targetpath, selectpn))
+
+ if selectpn:
+ logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
+ if postinst_pns:
+ logger.warn('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
+ rd = _parse_recipe(selectpn, tinfoil)
+ if not rd:
+ # Error message already shown
+ return 1
+ sourcefile, instelements, modpatches = determine_file_source(args.targetpath, rd)
+ sourcepath = None
+ if sourcefile:
+ sourcetype, sourcepath = sourcefile.split('://', 1)
+ logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
+ if sourcetype == 'patch':
+ logger.warn('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
+ sourcepath = None
+ else:
+ logger.debug('Unable to determine source file, proceeding anyway')
+ if modpatches:
+ logger.warn('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
+
+ if instelements and sourcepath:
+ install = None
+ else:
+ # Auto-determine permissions
+ # Check destination
+ binpaths = '${bindir}:${sbindir}:${base_bindir}:${base_sbindir}:${libexecdir}:${sysconfdir}/init.d'
+ perms = '0644'
+ if os.path.abspath(os.path.dirname(args.targetpath)) in rd.expand(binpaths).split(':'):
+ # File is going into a directory normally reserved for executables, so it should be executable
+ perms = '0755'
+ else:
+ # Check source
+ st = os.stat(args.newfile)
+ if st.st_mode & stat.S_IXUSR:
+ perms = '0755'
+ install = {args.newfile: (args.targetpath, perms)}
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ return 0
+ else:
+ if alternative_pns:
+ logger.error('File %s is an alternative possibly provided by the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(alternative_pns)))
+ elif postinst_pns:
+ logger.error('File %s may be written out in a pre/postinstall script of the following recipes:\n %s\nPlease select recipe with -r/--recipe' % (targetpath, '\n '.join(postinst_pns)))
+ return 3
+
+
+def appendsrc(args, files, rd, extralines=None):
+ import oe.recipeutils
+
+ srcdir = rd.getVar('S')
+ workdir = rd.getVar('WORKDIR')
+
+ import bb.fetch
+ simplified = {}
+ src_uri = rd.getVar('SRC_URI').split()
+ for uri in src_uri:
+ if uri.endswith(';'):
+ uri = uri[:-1]
+ simple_uri = bb.fetch.URI(uri)
+ simple_uri.params = {}
+ simplified[str(simple_uri)] = uri
+
+ copyfiles = {}
+ extralines = extralines or []
+ for newfile, srcfile in files.items():
+ src_destdir = os.path.dirname(srcfile)
+ if not args.use_workdir:
+ if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
+ srcdir = os.path.join(workdir, 'git')
+ if not bb.data.inherits_class('kernel-yocto', rd):
+ logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
+ src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
+ src_destdir = os.path.normpath(src_destdir)
+
+ source_uri = 'file://{0}'.format(os.path.basename(srcfile))
+ if src_destdir and src_destdir != '.':
+ source_uri += ';subdir={0}'.format(src_destdir)
+
+ simple = bb.fetch.URI(source_uri)
+ simple.params = {}
+ simple_str = str(simple)
+ if simple_str in simplified:
+ existing = simplified[simple_str]
+ if source_uri != existing:
+ logger.warn('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
+ else:
+ logger.warn('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ else:
+ extralines.append('SRC_URI += {0}'.format(source_uri))
+ copyfiles[newfile] = srcfile
+
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
+
+
+def appendsrcfiles(parser, args):
+ recipedata = _parse_recipe(args.recipe, tinfoil)
+ if not recipedata:
+ parser.error('RECIPE must be a valid recipe name')
+
+ files = dict((f, os.path.join(args.destdir, os.path.basename(f)))
+ for f in args.files)
+ return appendsrc(args, files, recipedata)
+
+
+def appendsrcfile(parser, args):
+ recipedata = _parse_recipe(args.recipe, tinfoil)
+ if not recipedata:
+ parser.error('RECIPE must be a valid recipe name')
+
+ if not args.destfile:
+ args.destfile = os.path.basename(args.file)
+ elif args.destfile.endswith('/'):
+ args.destfile = os.path.join(args.destfile, os.path.basename(args.file))
+
+ return appendsrc(args, {args.file: args.destfile}, recipedata)
+
+
+def layer(layerpath):
+ if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
+ raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
+ return layerpath
+
+
+def existing_path(filepath):
+ if not os.path.exists(filepath):
+ raise argparse.ArgumentTypeError('{0!r} must be an existing path'.format(filepath))
+ return filepath
+
+
+def existing_file(filepath):
+ filepath = existing_path(filepath)
+ if os.path.isdir(filepath):
+ raise argparse.ArgumentTypeError('{0!r} must be a file, not a directory'.format(filepath))
+ return filepath
+
+
+def destination_path(destpath):
+ if os.path.isabs(destpath):
+ raise argparse.ArgumentTypeError('{0!r} must be a relative path, not absolute'.format(destpath))
+ return destpath
+
+
+def target_path(targetpath):
+ if not os.path.isabs(targetpath):
+ raise argparse.ArgumentTypeError('{0!r} must be an absolute path, not relative'.format(targetpath))
+ return targetpath
+
+
+def register_commands(subparsers):
+ common = argparse.ArgumentParser(add_help=False)
+ common.add_argument('-m', '--machine', help='Make bbappend changes specific to a machine only', metavar='MACHINE')
+ common.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
+ common.add_argument('destlayer', metavar='DESTLAYER', help='Base directory of the destination layer to write the bbappend to', type=layer)
+
+ parser_appendfile = subparsers.add_parser('appendfile',
+ parents=[common],
+ help='Create/update a bbappend to replace a target file',
+ description='Creates a bbappend (or updates an existing one) to replace the specified file that appears in the target system, determining the recipe that packages the file and the required path and name for the bbappend automatically. Note that the ability to determine the recipe packaging a particular file depends upon the recipe\'s do_packagedata task having already run prior to running this command (which it will have when the recipe has been built successfully, which in turn will have happened if one or more of the recipe\'s packages is included in an image that has been built successfully).')
+ parser_appendfile.add_argument('targetpath', help='Path to the file to be replaced (as it would appear within the target image, e.g. /etc/motd)', type=target_path)
+ parser_appendfile.add_argument('newfile', help='Custom file to replace the target file with', type=existing_file)
+ parser_appendfile.add_argument('-r', '--recipe', help='Override recipe to apply to (default is to find which recipe already packages the file)')
+ parser_appendfile.set_defaults(func=appendfile, parserecipes=True)
+
+ common_src = argparse.ArgumentParser(add_help=False, parents=[common])
+ common_src.add_argument('-W', '--workdir', help='Unpack file into WORKDIR rather than S', dest='use_workdir', action='store_true')
+ common_src.add_argument('recipe', metavar='RECIPE', help='Override recipe to apply to')
+
+ parser = subparsers.add_parser('appendsrcfiles',
+ parents=[common_src],
+ help='Create/update a bbappend to add or replace source files',
+ description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
+ parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
+ parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
+ parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
+
+ parser = subparsers.add_parser('appendsrcfile',
+ parents=[common_src],
+ help='Create/update a bbappend to add or replace a source file',
+ description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
+ parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
+ parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
+ parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
diff --git a/poky/scripts/lib/recipetool/create.py b/poky/scripts/lib/recipetool/create.py
new file mode 100644
index 000000000..a3710285b
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create.py
@@ -0,0 +1,1329 @@
+# Recipe creation tool - create command plugin
+#
+# Copyright (C) 2014-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import json
+import logging
+import scriptutils
+from urllib.parse import urlparse, urldefrag, urlsplit
+import hashlib
+import bb.fetch2
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def log_error_cond(message, debugonly):
+ if debugonly:
+ logger.debug(message)
+ else:
+ logger.error(message)
+
+def log_info_cond(message, debugonly):
+ if debugonly:
+ logger.debug(message)
+ else:
+ logger.info(message)
+
+def plugin_init(pluginlist):
+ # Take a reference to the list so we can use it later
+ global plugins
+ plugins = pluginlist
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+class RecipeHandler(object):
+ recipelibmap = {}
+ recipeheadermap = {}
+ recipecmakefilemap = {}
+ recipebinmap = {}
+
+ def __init__(self):
+ self._devtool = False
+
+ @staticmethod
+ def load_libmap(d):
+ '''Load library->recipe mapping'''
+ import oe.package
+
+ if RecipeHandler.recipelibmap:
+ return
+ # First build up library->package mapping
+ shlib_providers = oe.package.read_shlib_providers(d)
+ libdir = d.getVar('libdir')
+ base_libdir = d.getVar('base_libdir')
+ libpaths = list(set([base_libdir, libdir]))
+ libname_re = re.compile('^lib(.+)\.so.*$')
+ pkglibmap = {}
+ for lib, item in shlib_providers.items():
+ for path, pkg in item.items():
+ if path in libpaths:
+ res = libname_re.match(lib)
+ if res:
+ libname = res.group(1)
+ if not libname in pkglibmap:
+ pkglibmap[libname] = pkg[0]
+ else:
+ logger.debug('unable to extract library name from %s' % lib)
+
+ # Now turn it into a library->recipe mapping
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+ for libname, pkg in pkglibmap.items():
+ try:
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
+ for line in f:
+ if line.startswith('PN:'):
+ RecipeHandler.recipelibmap[libname] = line.split(':', 1)[-1].strip()
+ break
+ except IOError as ioe:
+ if ioe.errno == 2:
+ logger.warn('unable to find a pkgdata file for package %s' % pkg)
+ else:
+ raise
+
+ # Some overrides - these should be mapped to the virtual
+ RecipeHandler.recipelibmap['GL'] = 'virtual/libgl'
+ RecipeHandler.recipelibmap['EGL'] = 'virtual/egl'
+ RecipeHandler.recipelibmap['GLESv2'] = 'virtual/libgles2'
+
+ @staticmethod
+ def load_devel_filemap(d):
+ '''Build up development file->recipe mapping'''
+ if RecipeHandler.recipeheadermap:
+ return
+ pkgdata_dir = d.getVar('PKGDATA_DIR')
+ includedir = d.getVar('includedir')
+ cmakedir = os.path.join(d.getVar('libdir'), 'cmake')
+ for pkg in glob.glob(os.path.join(pkgdata_dir, 'runtime', '*-dev')):
+ with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
+ pn = None
+ headers = []
+ cmakefiles = []
+ for line in f:
+ if line.startswith('PN:'):
+ pn = line.split(':', 1)[-1].strip()
+ elif line.startswith('FILES_INFO:'):
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ for fullpth in sorted(dictval):
+ if fullpth.startswith(includedir) and fullpth.endswith('.h'):
+ headers.append(os.path.relpath(fullpth, includedir))
+ elif fullpth.startswith(cmakedir) and fullpth.endswith('.cmake'):
+ cmakefiles.append(os.path.relpath(fullpth, cmakedir))
+ if pn and headers:
+ for header in headers:
+ RecipeHandler.recipeheadermap[header] = pn
+ if pn and cmakefiles:
+ for fn in cmakefiles:
+ RecipeHandler.recipecmakefilemap[fn] = pn
+
+ @staticmethod
+ def load_binmap(d):
+ '''Build up native binary->recipe mapping'''
+ if RecipeHandler.recipebinmap:
+ return
+ sstate_manifests = d.getVar('SSTATE_MANIFESTS')
+ staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE')
+ build_arch = d.getVar('BUILD_ARCH')
+ fileprefix = 'manifest-%s-' % build_arch
+ for fn in glob.glob(os.path.join(sstate_manifests, '%s*-native.populate_sysroot' % fileprefix)):
+ with open(fn, 'r') as f:
+ pn = os.path.basename(fn).rsplit('.', 1)[0][len(fileprefix):]
+ for line in f:
+ if line.startswith(staging_bindir_native):
+ prog = os.path.basename(line.rstrip())
+ RecipeHandler.recipebinmap[prog] = pn
+
+ @staticmethod
+ def checkfiles(path, speclist, recursive=False, excludedirs=None):
+ results = []
+ if recursive:
+ for root, dirs, files in os.walk(path, topdown=True):
+ if excludedirs:
+ dirs[:] = [d for d in dirs if d not in excludedirs]
+ for fn in files:
+ for spec in speclist:
+ if fnmatch.fnmatch(fn, spec):
+ results.append(os.path.join(root, fn))
+ else:
+ for spec in speclist:
+ results.extend(glob.glob(os.path.join(path, spec)))
+ return results
+
+ @staticmethod
+ def handle_depends(libdeps, pcdeps, deps, outlines, values, d):
+ if pcdeps:
+ recipemap = read_pkgconfig_provides(d)
+ if libdeps:
+ RecipeHandler.load_libmap(d)
+
+ ignorelibs = ['socket']
+ ignoredeps = ['gcc-runtime', 'glibc', 'uclibc', 'musl', 'tar-native', 'binutils-native', 'coreutils-native']
+
+ unmappedpc = []
+ pcdeps = list(set(pcdeps))
+ for pcdep in pcdeps:
+ if isinstance(pcdep, str):
+ recipe = recipemap.get(pcdep, None)
+ if recipe:
+ deps.append(recipe)
+ else:
+ if not pcdep.startswith('$'):
+ unmappedpc.append(pcdep)
+ else:
+ for item in pcdep:
+ recipe = recipemap.get(pcdep, None)
+ if recipe:
+ deps.append(recipe)
+ break
+ else:
+ unmappedpc.append('(%s)' % ' or '.join(pcdep))
+
+ unmappedlibs = []
+ for libdep in libdeps:
+ if isinstance(libdep, tuple):
+ lib, header = libdep
+ else:
+ lib = libdep
+ header = None
+
+ if lib in ignorelibs:
+ logger.debug('Ignoring library dependency %s' % lib)
+ continue
+
+ recipe = RecipeHandler.recipelibmap.get(lib, None)
+ if recipe:
+ deps.append(recipe)
+ elif recipe is None:
+ if header:
+ RecipeHandler.load_devel_filemap(d)
+ recipe = RecipeHandler.recipeheadermap.get(header, None)
+ if recipe:
+ deps.append(recipe)
+ elif recipe is None:
+ unmappedlibs.append(lib)
+ else:
+ unmappedlibs.append(lib)
+
+ deps = set(deps).difference(set(ignoredeps))
+
+ if unmappedpc:
+ outlines.append('# NOTE: unable to map the following pkg-config dependencies: %s' % ' '.join(unmappedpc))
+ outlines.append('# (this is based on recipes that have previously been built and packaged)')
+
+ if unmappedlibs:
+ outlines.append('# NOTE: the following library dependencies are unknown, ignoring: %s' % ' '.join(list(set(unmappedlibs))))
+ outlines.append('# (this is based on recipes that have previously been built and packaged)')
+
+ if deps:
+ values['DEPENDS'] = ' '.join(deps)
+
+ @staticmethod
+ def genfunction(outlines, funcname, content, python=False, forcespace=False):
+ if python:
+ prefix = 'python '
+ else:
+ prefix = ''
+ outlines.append('%s%s () {' % (prefix, funcname))
+ if python or forcespace:
+ indent = ' '
+ else:
+ indent = '\t'
+ addnoop = not python
+ for line in content:
+ outlines.append('%s%s' % (indent, line))
+ if addnoop:
+ strippedline = line.lstrip()
+ if strippedline and not strippedline.startswith('#'):
+ addnoop = False
+ if addnoop:
+ # Without this there'll be a syntax error
+ outlines.append('%s:' % indent)
+ outlines.append('}')
+ outlines.append('')
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ return False
+
+
+def validate_pv(pv):
+ if not pv or '_version' in pv.lower() or pv[0] not in '0123456789':
+ return False
+ return True
+
+def determine_from_filename(srcfile):
+ """Determine name and version from a filename"""
+ if is_package(srcfile):
+ # Force getting the value from the package metadata
+ return None, None
+
+ if '.tar.' in srcfile:
+ namepart = srcfile.split('.tar.')[0]
+ else:
+ namepart = os.path.splitext(srcfile)[0]
+ namepart = namepart.lower().replace('_', '-')
+ if namepart.endswith('.src'):
+ namepart = namepart[:-4]
+ if namepart.endswith('.orig'):
+ namepart = namepart[:-5]
+ splitval = namepart.split('-')
+ logger.debug('determine_from_filename: split name %s into: %s' % (srcfile, splitval))
+
+ ver_re = re.compile('^v?[0-9]')
+
+ pv = None
+ pn = None
+ if len(splitval) == 1:
+ # Try to split the version out if there is no separator (or a .)
+ res = re.match('^([^0-9]+)([0-9.]+.*)$', namepart)
+ if res:
+ if len(res.group(1)) > 1 and len(res.group(2)) > 1:
+ pn = res.group(1).rstrip('.')
+ pv = res.group(2)
+ else:
+ pn = namepart
+ else:
+ if splitval[-1] in ['source', 'src']:
+ splitval.pop()
+ if len(splitval) > 2 and re.match('^(alpha|beta|stable|release|rc[0-9]|pre[0-9]|p[0-9]|[0-9]{8})', splitval[-1]) and ver_re.match(splitval[-2]):
+ pv = '-'.join(splitval[-2:])
+ if pv.endswith('-release'):
+ pv = pv[:-8]
+ splitval = splitval[:-2]
+ elif ver_re.match(splitval[-1]):
+ pv = splitval.pop()
+ pn = '-'.join(splitval)
+ if pv and pv.startswith('v'):
+ pv = pv[1:]
+ logger.debug('determine_from_filename: name = "%s" version = "%s"' % (pn, pv))
+ return (pn, pv)
+
+def determine_from_url(srcuri):
+ """Determine name and version from a URL"""
+ pn = None
+ pv = None
+ parseres = urlparse(srcuri.lower().split(';', 1)[0])
+ if parseres.path:
+ if 'github.com' in parseres.netloc:
+ res = re.search(r'.*/(.*?)/archive/(.*)-final\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+ else:
+ res = re.search(r'.*/(.*?)/archive/v?(.*)\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+ elif 'bitbucket.org' in parseres.netloc:
+ res = re.search(r'.*/(.*?)/get/[a-zA-Z_-]*([0-9][0-9a-zA-Z_.]*)\.(tar|zip)', parseres.path)
+ if res:
+ pn = res.group(1).strip().replace('_', '-')
+ pv = res.group(2).strip().replace('_', '.')
+
+ if not pn and not pv:
+ if parseres.scheme not in ['git', 'gitsm', 'svn', 'hg']:
+ srcfile = os.path.basename(parseres.path.rstrip('/'))
+ pn, pv = determine_from_filename(srcfile)
+ elif parseres.scheme in ['git', 'gitsm']:
+ pn = os.path.basename(parseres.path.rstrip('/')).lower().replace('_', '-')
+ if pn.endswith('.git'):
+ pn = pn[:-4]
+
+ logger.debug('Determined from source URL: name = "%s", version = "%s"' % (pn, pv))
+ return (pn, pv)
+
+def supports_srcrev(uri):
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ # This is a bit sad, but if you don't have this set there can be some
+ # odd interactions with the urldata cache which lead to errors
+ localdata.setVar('SRCREV', '${AUTOREV}')
+ try:
+ fetcher = bb.fetch2.Fetch([uri], localdata)
+ urldata = fetcher.ud
+ for u in urldata:
+ if urldata[u].method.supports_srcrev():
+ return True
+ except bb.fetch2.FetchError as e:
+ logger.debug('FetchError in supports_srcrev: %s' % str(e))
+ # Fall back to basic check
+ if uri.startswith(('git://', 'gitsm://')):
+ return True
+ return False
+
+def reformat_git_uri(uri):
+ '''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
+ checkuri = uri.split(';', 1)[0]
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ # Appends scheme if the scheme is missing
+ if not '://' in uri:
+ uri = 'git://' + uri
+ scheme, host, path, user, pswd, parms = bb.fetch2.decodeurl(uri)
+ # Detection mechanism, this is required due to certain URL are formatter with ":" rather than "/"
+ # which causes decodeurl to fail getting the right host and path
+ if len(host.split(':')) > 1:
+ splitslash = host.split(':')
+ # Port number should not be split from host
+ if not re.match('^[0-9]+$', splitslash[1]):
+ host = splitslash[0]
+ path = '/' + splitslash[1] + path
+ #Algorithm:
+ # if user is defined, append protocol=ssh or if a protocol is defined, then honor the user-defined protocol
+ # if no user & password is defined, check for scheme type and append the protocol with the scheme type
+ # finally if protocols or if the url is well-formed, do nothing and rejoin everything back to normal
+ # Need to repackage the arguments for encodeurl, the format is: (scheme, host, path, user, password, OrderedDict([('key', 'value')]))
+ if user:
+ if not 'protocol' in parms:
+ parms.update({('protocol', 'ssh')})
+ elif (scheme == "http" or scheme == 'https' or scheme == 'ssh') and not ('protocol' in parms):
+ parms.update({('protocol', scheme)})
+ # Always append 'git://'
+ fUrl = bb.fetch2.encodeurl(('git', host, path, user, pswd, parms))
+ return fUrl
+ else:
+ return uri
+
+def is_package(url):
+ '''Check if a URL points to a package'''
+ checkurl = url.split(';', 1)[0]
+ if checkurl.endswith(('.deb', '.ipk', '.rpm', '.srpm')):
+ return True
+ return False
+
+def create_recipe(args):
+ import bb.process
+ import tempfile
+ import shutil
+ import oe.recipeutils
+
+ pkgarch = ""
+ if args.machine:
+ pkgarch = "${MACHINE_ARCH}"
+
+ extravalues = {}
+ checksums = {}
+ tempsrc = ''
+ source = args.source
+ srcsubdir = ''
+ srcrev = '${AUTOREV}'
+ srcbranch = ''
+ scheme = ''
+ storeTagName = ''
+ pv_srcpv = False
+
+ if os.path.isfile(source):
+ source = 'file://%s' % os.path.abspath(source)
+
+ if scriptutils.is_src_url(source):
+ # Warn about github archive URLs
+ if re.match('https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
+ logger.warn('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
+ # Fetch a URL
+ fetchuri = reformat_git_uri(urldefrag(source)[0])
+ if args.binary:
+ # Assume the archive contains the directory structure verbatim
+ # so we need to extract to a subdirectory
+ fetchuri += ';subdir=${BP}'
+ srcuri = fetchuri
+ rev_re = re.compile(';rev=([^;]+)')
+ res = rev_re.search(srcuri)
+ if res:
+ if args.srcrev:
+ logger.error('rev= parameter and -S/--srcrev option cannot both be specified - use one or the other')
+ sys.exit(1)
+ if args.autorev:
+ logger.error('rev= parameter and -a/--autorev option cannot both be specified - use one or the other')
+ sys.exit(1)
+ srcrev = res.group(1)
+ srcuri = rev_re.sub('', srcuri)
+ elif args.srcrev:
+ srcrev = args.srcrev
+
+ # Check whether users provides any branch info in fetchuri.
+ # If true, we will skip all branch checking process to honor all user's input.
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(fetchuri)
+ srcbranch = params.get('branch')
+ if args.srcbranch:
+ if srcbranch:
+ logger.error('branch= parameter and -B/--srcbranch option cannot both be specified - use one or the other')
+ sys.exit(1)
+ srcbranch = args.srcbranch
+ nobranch = params.get('nobranch')
+ if nobranch and srcbranch:
+ logger.error('nobranch= cannot be used if you specify a branch')
+ sys.exit(1)
+ tag = params.get('tag')
+ if not srcbranch and not nobranch and srcrev != '${AUTOREV}':
+ # Append nobranch=1 in the following conditions:
+ # 1. User did not set 'branch=' in srcuri, and
+ # 2. User did not set 'nobranch=1' in srcuri, and
+ # 3. Source revision is not '${AUTOREV}'
+ params['nobranch'] = '1'
+ if tag:
+ # Keep a copy of tag and append nobranch=1 then remove tag from URL.
+ # Bitbake fetcher unable to fetch when {AUTOREV} and tag is set at the same time.
+ storeTagName = params['tag']
+ params['nobranch'] = '1'
+ del params['tag']
+ if scheme == 'npm':
+ params['noverify'] = '1'
+ fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+
+ tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
+ bb.utils.mkdirhier(tmpparent)
+ tempsrc = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
+ srctree = os.path.join(tempsrc, 'source')
+
+ try:
+ checksums, ftmpdir = scriptutils.fetch_url(tinfoil, fetchuri, srcrev, srctree, logger, preserve_tmp=args.keep_temp)
+ except scriptutils.FetchUrlFailure as e:
+ logger.error(str(e))
+ sys.exit(1)
+
+ if ftmpdir and args.keep_temp:
+ logger.info('Fetch temp directory is %s' % ftmpdir)
+
+ dirlist = os.listdir(srctree)
+ filterout = ['git.indirectionsymlink']
+ dirlist = [x for x in dirlist if x not in filterout]
+ logger.debug('Directory listing (excluding filtered out):\n %s' % '\n '.join(dirlist))
+ if len(dirlist) == 1:
+ singleitem = os.path.join(srctree, dirlist[0])
+ if os.path.isdir(singleitem):
+ # We unpacked a single directory, so we should use that
+ srcsubdir = dirlist[0]
+ srctree = os.path.join(srctree, srcsubdir)
+ else:
+ check_single_file(dirlist[0], fetchuri)
+ elif len(dirlist) == 0:
+ if '/' in fetchuri:
+ fn = os.path.join(tinfoil.config_data.getVar('DL_DIR'), fetchuri.split('/')[-1])
+ if os.path.isfile(fn):
+ check_single_file(fn, fetchuri)
+ # If we've got to here then there's no source so we might as well give up
+ logger.error('URL %s resulted in an empty source tree' % fetchuri)
+ sys.exit(1)
+
+ # We need this checking mechanism to improve the recipe created by recipetool and devtool
+ # is able to parse and build by bitbake.
+ # If there is no input for branch name, then check for branch name with SRCREV provided.
+ if not srcbranch and not nobranch and srcrev and (srcrev != '${AUTOREV}') and scheme in ['git', 'gitsm']:
+ try:
+ cmd = 'git branch -r --contains'
+ check_branch, check_branch_err = bb.process.run('%s %s' % (cmd, srcrev), cwd=srctree)
+ except bb.process.ExecutionError as err:
+ logger.error(str(err))
+ sys.exit(1)
+ get_branch = [x.strip() for x in check_branch.splitlines()]
+ # Remove HEAD reference point and drop remote prefix
+ get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
+ if 'master' in get_branch:
+ # If it is master, we do not need to append 'branch=master' as this is default.
+ # Even with the case where get_branch has multiple objects, if 'master' is one
+ # of them, we should default take from 'master'
+ srcbranch = ''
+ elif len(get_branch) == 1:
+ # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ srcbranch = get_branch[0]
+ else:
+ # If get_branch contains more than one objects, then display error and exit.
+ mbrch = '\n ' + '\n '.join(get_branch)
+ logger.error('Revision %s was found on multiple branches: %s\nPlease provide the correct branch with -B/--srcbranch' % (srcrev, mbrch))
+ sys.exit(1)
+
+ # Since we might have a value in srcbranch, we need to
+ # recontruct the srcuri to include 'branch' in params.
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
+ if srcbranch:
+ params['branch'] = srcbranch
+
+ if storeTagName and scheme in ['git', 'gitsm']:
+ # Check srcrev using tag and check validity of the tag
+ cmd = ('git rev-parse --verify %s' % (storeTagName))
+ try:
+ check_tag, check_tag_err = bb.process.run('%s' % cmd, cwd=srctree)
+ srcrev = check_tag.split()[0]
+ except bb.process.ExecutionError as err:
+ logger.error(str(err))
+ logger.error("Possibly wrong tag name is provided")
+ sys.exit(1)
+ # Drop tag from srcuri as it will have conflicts with SRCREV during recipe parse.
+ del params['tag']
+ srcuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
+
+ if os.path.exists(os.path.join(srctree, '.gitmodules')) and srcuri.startswith('git://'):
+ srcuri = 'gitsm://' + srcuri[6:]
+ logger.info('Fetching submodules...')
+ bb.process.run('git submodule update --init --recursive', cwd=srctree)
+
+ if is_package(fetchuri):
+ localdata = bb.data.createCopy(tinfoil.config_data)
+ pkgfile = bb.fetch2.localpath(fetchuri, localdata)
+ if pkgfile:
+ tmpfdir = tempfile.mkdtemp(prefix='recipetool-')
+ try:
+ if pkgfile.endswith(('.deb', '.ipk')):
+ stdout, _ = bb.process.run('ar x %s' % pkgfile, cwd=tmpfdir)
+ stdout, _ = bb.process.run('tar xf control.tar.gz', cwd=tmpfdir)
+ values = convert_debian(tmpfdir)
+ extravalues.update(values)
+ elif pkgfile.endswith(('.rpm', '.srpm')):
+ stdout, _ = bb.process.run('rpm -qp --xml %s > pkginfo.xml' % pkgfile, cwd=tmpfdir)
+ values = convert_rpm_xml(os.path.join(tmpfdir, 'pkginfo.xml'))
+ extravalues.update(values)
+ finally:
+ shutil.rmtree(tmpfdir)
+ else:
+ # Assume we're pointing to an existing source tree
+ if args.extract_to:
+ logger.error('--extract-to cannot be specified if source is a directory')
+ sys.exit(1)
+ if not os.path.isdir(source):
+ logger.error('Invalid source directory %s' % source)
+ sys.exit(1)
+ srctree = source
+ srcuri = ''
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Try to get upstream repo location from origin remote
+ try:
+ stdout, _ = bb.process.run('git remote -v', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ for line in stdout.splitlines():
+ splitline = line.split()
+ if len(splitline) > 1:
+ if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
+ srcuri = reformat_git_uri(splitline[1])
+ srcsubdir = 'git'
+ break
+
+ if args.src_subdir:
+ srcsubdir = os.path.join(srcsubdir, args.src_subdir)
+ srctree_use = os.path.abspath(os.path.join(srctree, args.src_subdir))
+ else:
+ srctree_use = os.path.abspath(srctree)
+
+ if args.outfile and os.path.isdir(args.outfile):
+ outfile = None
+ outdir = args.outfile
+ else:
+ outfile = args.outfile
+ outdir = None
+ if outfile and outfile != '-':
+ if os.path.exists(outfile):
+ logger.error('Output file %s already exists' % outfile)
+ sys.exit(1)
+
+ lines_before = []
+ lines_after = []
+
+ lines_before.append('# Recipe created by %s' % os.path.basename(sys.argv[0]))
+ lines_before.append('# This is the basis of a recipe and may need further editing in order to be fully functional.')
+ lines_before.append('# (Feel free to remove these comments when editing.)')
+ # We need a blank line here so that patch_recipe_lines can rewind before the LICENSE comments
+ lines_before.append('')
+
+ # We'll come back and replace this later in handle_license_vars()
+ lines_before.append('##LICENSE_PLACEHOLDER##')
+
+ handled = []
+ classes = []
+
+ # FIXME This is kind of a hack, we probably ought to be using bitbake to do this
+ pn = None
+ pv = None
+ if outfile:
+ recipefn = os.path.splitext(os.path.basename(outfile))[0]
+ fnsplit = recipefn.split('_')
+ if len(fnsplit) > 1:
+ pn = fnsplit[0]
+ pv = fnsplit[1]
+ else:
+ pn = recipefn
+
+ if args.version:
+ pv = args.version
+
+ if args.name:
+ pn = args.name
+ if args.name.endswith('-native'):
+ if args.also_native:
+ logger.error('--also-native cannot be specified for a recipe named *-native (*-native denotes a recipe that is already only for native) - either remove the -native suffix from the name or drop --also-native')
+ sys.exit(1)
+ classes.append('native')
+ elif args.name.startswith('nativesdk-'):
+ if args.also_native:
+ logger.error('--also-native cannot be specified for a recipe named nativesdk-* (nativesdk-* denotes a recipe that is already only for nativesdk)')
+ sys.exit(1)
+ classes.append('nativesdk')
+
+ if pv and pv not in 'git svn hg'.split():
+ realpv = pv
+ else:
+ realpv = None
+
+ if not srcuri:
+ lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
+ lines_before.append('SRC_URI = "%s"' % srcuri)
+ for key, value in sorted(checksums.items()):
+ lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
+ if srcuri and supports_srcrev(srcuri):
+ lines_before.append('')
+ lines_before.append('# Modify these as desired')
+ # Note: we have code to replace realpv further down if it gets set to some other value
+ scheme, _, _, _, _, _ = bb.fetch2.decodeurl(srcuri)
+ if scheme in ['git', 'gitsm']:
+ srcpvprefix = 'git'
+ elif scheme == 'svn':
+ srcpvprefix = 'svnr'
+ else:
+ srcpvprefix = scheme
+ lines_before.append('PV = "%s+%s${SRCPV}"' % (realpv or '1.0', srcpvprefix))
+ pv_srcpv = True
+ if not args.autorev and srcrev == '${AUTOREV}':
+ if os.path.exists(os.path.join(srctree, '.git')):
+ (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
+ srcrev = stdout.rstrip()
+ lines_before.append('SRCREV = "%s"' % srcrev)
+ if args.provides:
+ lines_before.append('PROVIDES = "%s"' % args.provides)
+ lines_before.append('')
+
+ if srcsubdir and not args.binary:
+ # (for binary packages we explicitly specify subdir= when fetching to
+ # match the default value of S, so we don't need to set it in that case)
+ lines_before.append('S = "${WORKDIR}/%s"' % srcsubdir)
+ lines_before.append('')
+
+ if pkgarch:
+ lines_after.append('PACKAGE_ARCH = "%s"' % pkgarch)
+ lines_after.append('')
+
+ if args.binary:
+ lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
+ lines_after.append('')
+
+ if args.fetch_dev:
+ extravalues['fetchdev'] = True
+ else:
+ extravalues['fetchdev'] = None
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading recipe handlers')
+ raw_handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_recipe_handlers'):
+ plugin.register_recipe_handlers(raw_handlers)
+ # Sort handlers by priority
+ handlers = []
+ for i, handler in enumerate(raw_handlers):
+ if isinstance(handler, tuple):
+ handlers.append((handler[0], handler[1], i))
+ else:
+ handlers.append((handler, 0, i))
+ handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
+ for handler, priority, _ in handlers:
+ logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ setattr(handler, '_devtool', args.devtool)
+ handlers = [item[0] for item in handlers]
+
+ # Apply the handlers
+ if args.binary:
+ classes.append('bin_package')
+ handled.append('buildsystem')
+
+ for handler in handlers:
+ handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+
+ extrafiles = extravalues.pop('extrafiles', {})
+ extra_pn = extravalues.pop('PN', None)
+ extra_pv = extravalues.pop('PV', None)
+
+ if extra_pv and not realpv:
+ realpv = extra_pv
+ if not validate_pv(realpv):
+ realpv = None
+ else:
+ realpv = realpv.lower().split()[0]
+ if '_' in realpv:
+ realpv = realpv.replace('_', '-')
+ if extra_pn and not pn:
+ pn = extra_pn
+ if pn.startswith('GNU '):
+ pn = pn[4:]
+ if ' ' in pn:
+ # Probably a descriptive identifier rather than a proper name
+ pn = None
+ else:
+ pn = pn.lower()
+ if '_' in pn:
+ pn = pn.replace('_', '-')
+
+ if srcuri and not realpv or not pn:
+ name_pn, name_pv = determine_from_url(srcuri)
+ if name_pn and not pn:
+ pn = name_pn
+ if name_pv and not realpv:
+ realpv = name_pv
+
+ licvalues = handle_license_vars(srctree_use, lines_before, handled, extravalues, tinfoil.config_data)
+
+ if not outfile:
+ if not pn:
+ log_error_cond('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile', args.devtool)
+ # devtool looks for this specific exit code, so don't change it
+ sys.exit(15)
+ else:
+ if srcuri and srcuri.startswith(('gitsm://', 'git://', 'hg://', 'svn://')):
+ suffix = srcuri.split(':', 1)[0]
+ if suffix == 'gitsm':
+ suffix = 'git'
+ outfile = '%s_%s.bb' % (pn, suffix)
+ elif realpv:
+ outfile = '%s_%s.bb' % (pn, realpv)
+ else:
+ outfile = '%s.bb' % pn
+ if outdir:
+ outfile = os.path.join(outdir, outfile)
+ # We need to check this again
+ if os.path.exists(outfile):
+ logger.error('Output file %s already exists' % outfile)
+ sys.exit(1)
+
+ # Move any extra files the plugins created to a directory next to the recipe
+ if extrafiles:
+ if outfile == '-':
+ extraoutdir = pn
+ else:
+ extraoutdir = os.path.join(os.path.dirname(outfile), pn)
+ bb.utils.mkdirhier(extraoutdir)
+ for destfn, extrafile in extrafiles.items():
+ shutil.move(extrafile, os.path.join(extraoutdir, destfn))
+
+ lines = lines_before
+ lines_before = []
+ skipblank = True
+ for line in lines:
+ if skipblank:
+ skipblank = False
+ if not line:
+ continue
+ if line.startswith('S = '):
+ if realpv and pv not in 'git svn hg'.split():
+ line = line.replace(realpv, '${PV}')
+ if pn:
+ line = line.replace(pn, '${BPN}')
+ if line == 'S = "${WORKDIR}/${BPN}-${PV}"':
+ skipblank = True
+ continue
+ elif line.startswith('SRC_URI = '):
+ if realpv and not pv_srcpv:
+ line = line.replace(realpv, '${PV}')
+ elif line.startswith('PV = '):
+ if realpv:
+ # Replace the first part of the PV value
+ line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
+ lines_before.append(line)
+
+ if args.also_native:
+ lines = lines_after
+ lines_after = []
+ bbclassextend = None
+ for line in lines:
+ if line.startswith('BBCLASSEXTEND ='):
+ splitval = line.split('"')
+ if len(splitval) > 1:
+ bbclassextend = splitval[1].split()
+ if not 'native' in bbclassextend:
+ bbclassextend.insert(0, 'native')
+ line = 'BBCLASSEXTEND = "%s"' % ' '.join(bbclassextend)
+ lines_after.append(line)
+ if not bbclassextend:
+ lines_after.append('BBCLASSEXTEND = "native"')
+
+ postinst = ("postinst", extravalues.pop('postinst', None))
+ postrm = ("postrm", extravalues.pop('postrm', None))
+ preinst = ("preinst", extravalues.pop('preinst', None))
+ prerm = ("prerm", extravalues.pop('prerm', None))
+ funcs = [postinst, postrm, preinst, prerm]
+ for func in funcs:
+ if func[1]:
+ RecipeHandler.genfunction(lines_after, 'pkg_%s_${PN}' % func[0], func[1])
+
+ outlines = []
+ outlines.extend(lines_before)
+ if classes:
+ if outlines[-1] and not outlines[-1].startswith('#'):
+ outlines.append('')
+ outlines.append('inherit %s' % ' '.join(classes))
+ outlines.append('')
+ outlines.extend(lines_after)
+
+ if extravalues:
+ _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
+
+ if args.extract_to:
+ scriptutils.git_convert_standalone_clone(srctree)
+ if os.path.isdir(args.extract_to):
+ # If the directory exists we'll move the temp dir into it instead of
+ # its contents - of course, we could try to always move its contents
+ # but that is a pain if there are symlinks; the simplest solution is
+ # to just remove it first
+ os.rmdir(args.extract_to)
+ shutil.move(srctree, args.extract_to)
+ if tempsrc == srctree:
+ tempsrc = None
+ log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
+
+ if outfile == '-':
+ sys.stdout.write('\n'.join(outlines) + '\n')
+ else:
+ with open(outfile, 'w') as f:
+ lastline = None
+ for line in outlines:
+ if not lastline and not line:
+ # Skip extra blank lines
+ continue
+ f.write('%s\n' % line)
+ lastline = line
+ log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
+
+ if tempsrc:
+ if args.keep_temp:
+ logger.info('Preserving temporary directory %s' % tempsrc)
+ else:
+ shutil.rmtree(tempsrc)
+
+ return 0
+
+def check_single_file(fn, fetchuri):
+ """Determine if a single downloaded file is something we can't handle"""
+ with open(fn, 'r', errors='surrogateescape') as f:
+ if '<html' in f.read(100).lower():
+ logger.error('Fetching "%s" returned a single HTML page - check the URL is correct and functional' % fetchuri)
+ sys.exit(1)
+
+def split_value(value):
+ if isinstance(value, str):
+ return value.split()
+ else:
+ return value
+
+def handle_license_vars(srctree, lines_before, handled, extravalues, d):
+ lichandled = [x for x in handled if x[0] == 'license']
+ if lichandled:
+ # Someone else has already handled the license vars, just return their value
+ return lichandled[0][1]
+
+ licvalues = guess_license(srctree, d)
+ licenses = []
+ lic_files_chksum = []
+ lic_unknown = []
+ lines = []
+ if licvalues:
+ for licvalue in licvalues:
+ if not licvalue[0] in licenses:
+ licenses.append(licvalue[0])
+ lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
+ if licvalue[0] == 'Unknown':
+ lic_unknown.append(licvalue[1])
+ if lic_unknown:
+ lines.append('#')
+ lines.append('# The following license files were not able to be identified and are')
+ lines.append('# represented as "Unknown" below, you will need to check them yourself:')
+ for licfile in lic_unknown:
+ lines.append('# %s' % licfile)
+
+ extra_license = split_value(extravalues.pop('LICENSE', []))
+ if '&' in extra_license:
+ extra_license.remove('&')
+ if extra_license:
+ if licenses == ['Unknown']:
+ licenses = extra_license
+ else:
+ for item in extra_license:
+ if item not in licenses:
+ licenses.append(item)
+ extra_lic_files_chksum = split_value(extravalues.pop('LIC_FILES_CHKSUM', []))
+ for item in extra_lic_files_chksum:
+ if item not in lic_files_chksum:
+ lic_files_chksum.append(item)
+
+ if lic_files_chksum:
+ # We are going to set the vars, so prepend the standard disclaimer
+ lines.insert(0, '# WARNING: the following LICENSE and LIC_FILES_CHKSUM values are best guesses - it is')
+ lines.insert(1, '# your responsibility to verify that the values are complete and correct.')
+ else:
+ # Without LIC_FILES_CHKSUM we set LICENSE = "CLOSED" to allow the
+ # user to get started easily
+ lines.append('# Unable to find any files that looked like license statements. Check the accompanying')
+ lines.append('# documentation and source headers and set LICENSE and LIC_FILES_CHKSUM accordingly.')
+ lines.append('#')
+ lines.append('# NOTE: LICENSE is being set to "CLOSED" to allow you to at least start building - if')
+ lines.append('# this is not accurate with respect to the licensing of the software being built (it')
+ lines.append('# will not be in most cases) you must specify the correct value before using this')
+ lines.append('# recipe for anything other than initial testing/development!')
+ licenses = ['CLOSED']
+
+ if extra_license and sorted(licenses) != sorted(extra_license):
+ lines.append('# NOTE: Original package / source metadata indicates license is: %s' % ' & '.join(extra_license))
+
+ if len(licenses) > 1:
+ lines.append('#')
+ lines.append('# NOTE: multiple licenses have been detected; they have been separated with &')
+ lines.append('# in the LICENSE value for now since it is a reasonable assumption that all')
+ lines.append('# of the licenses apply. If instead there is a choice between the multiple')
+ lines.append('# licenses then you should change the value to separate the licenses with |')
+ lines.append('# instead of &. If there is any doubt, check the accompanying documentation')
+ lines.append('# to determine which situation is applicable.')
+
+ lines.append('LICENSE = "%s"' % ' & '.join(licenses))
+ lines.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
+ lines.append('')
+
+ # Replace the placeholder so we get the values in the right place in the recipe file
+ try:
+ pos = lines_before.index('##LICENSE_PLACEHOLDER##')
+ except ValueError:
+ pos = -1
+ if pos == -1:
+ lines_before.extend(lines)
+ else:
+ lines_before[pos:pos+1] = lines
+
+ handled.append(('license', licvalues))
+ return licvalues
+
+def get_license_md5sums(d, static_only=False):
+ import bb.utils
+ md5sums = {}
+ if not static_only:
+ # Gather md5sums of license files in common license dir
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR')
+ for fn in os.listdir(commonlicdir):
+ md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
+ md5sums[md5value] = fn
+ # The following were extracted from common values in various recipes
+ # (double checking the license against the license file itself, not just
+ # the LICENSE value in the recipe)
+ md5sums['94d55d512a9ba36caa9b7df079bae19f'] = 'GPLv2'
+ md5sums['b234ee4d69f5fce4486a80fdaf4a4263'] = 'GPLv2'
+ md5sums['59530bdf33659b29e73d4adb9f9f6552'] = 'GPLv2'
+ md5sums['0636e73ff0215e8d672dc4c32c317bb3'] = 'GPLv2'
+ md5sums['eb723b61539feef013de476e68b5c50a'] = 'GPLv2'
+ md5sums['751419260aa954499f7abaabaa882bbe'] = 'GPLv2'
+ md5sums['393a5ca445f6965873eca0259a17f833'] = 'GPLv2'
+ md5sums['12f884d2ae1ff87c09e5b7ccc2c4ca7e'] = 'GPLv2'
+ md5sums['8ca43cbc842c2336e835926c2166c28b'] = 'GPLv2'
+ md5sums['ebb5c50ab7cab4baeffba14977030c07'] = 'GPLv2'
+ md5sums['c93c0550bd3173f4504b2cbd8991e50b'] = 'GPLv2'
+ md5sums['9ac2e7cff1ddaf48b6eab6028f23ef88'] = 'GPLv2'
+ md5sums['4325afd396febcb659c36b49533135d4'] = 'GPLv2'
+ md5sums['18810669f13b87348459e611d31ab760'] = 'GPLv2'
+ md5sums['d7810fab7487fb0aad327b76f1be7cd7'] = 'GPLv2' # the Linux kernel's COPYING file
+ md5sums['bbb461211a33b134d42ed5ee802b37ff'] = 'LGPLv2.1'
+ md5sums['7fbc338309ac38fefcd64b04bb903e34'] = 'LGPLv2.1'
+ md5sums['4fbd65380cdd255951079008b364516c'] = 'LGPLv2.1'
+ md5sums['2d5025d4aa3495befef8f17206a5b0a1'] = 'LGPLv2.1'
+ md5sums['fbc093901857fcd118f065f900982c24'] = 'LGPLv2.1'
+ md5sums['a6f89e2100d9b6cdffcea4f398e37343'] = 'LGPLv2.1'
+ md5sums['d8045f3b8f929c1cb29a1e3fd737b499'] = 'LGPLv2.1'
+ md5sums['fad9b3332be894bab9bc501572864b29'] = 'LGPLv2.1'
+ md5sums['3bf50002aefd002f49e7bb854063f7e7'] = 'LGPLv2'
+ md5sums['9f604d8a4f8e74f4f5140845a21b6674'] = 'LGPLv2'
+ md5sums['5f30f0716dfdd0d91eb439ebec522ec2'] = 'LGPLv2'
+ md5sums['55ca817ccb7d5b5b66355690e9abc605'] = 'LGPLv2'
+ md5sums['252890d9eee26aab7b432e8b8a616475'] = 'LGPLv2'
+ md5sums['3214f080875748938ba060314b4f727d'] = 'LGPLv2'
+ md5sums['db979804f025cf55aabec7129cb671ed'] = 'LGPLv2'
+ md5sums['d32239bcb673463ab874e80d47fae504'] = 'GPLv3'
+ md5sums['f27defe1e96c2e1ecd4e0c9be8967949'] = 'GPLv3'
+ md5sums['6a6a8e020838b23406c81b19c1d46df6'] = 'LGPLv3'
+ md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
+ md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
+ md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+ return md5sums
+
+def crunch_license(licfile):
+ '''
+ Remove non-material text from a license file and then check
+ its md5sum against a known list. This works well for licenses
+ which contain a copyright statement, but is also a useful way
+ to handle people's insistence upon reformatting the license text
+ slightly (with no material difference to the text of the
+ license).
+ '''
+
+ import oe.utils
+
+ # Note: these are carefully constructed!
+ license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
+ license_statement_re = re.compile('^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile('^(#+)? *Copyright .*$')
+
+ crunched_md5sums = {}
+ # The following two were gleaned from the "forever" npm package
+ crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
+ crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
+ # https://github.com/vasi/pixz/blob/master/LICENSE
+ crunched_md5sums['2f03392b40bbe663597b5bd3cc5ebdb9'] = 'BSD-2-Clause'
+ # https://github.com/waffle-gl/waffle/blob/master/LICENSE.txt
+ crunched_md5sums['e72e5dfef0b1a4ca8a3d26a60587db66'] = 'BSD-2-Clause'
+ # https://github.com/spigwitmer/fakeds1963s/blob/master/LICENSE
+ crunched_md5sums['8be76ac6d191671f347ee4916baa637e'] = 'GPLv2'
+ # https://github.com/datto/dattobd/blob/master/COPYING
+ # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/GPLv2.TXT
+ crunched_md5sums['1d65c5ad4bf6489f85f4812bf08ae73d'] = 'GPLv2'
+ # http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
+ # http://git.neil.brown.name/?p=mdadm.git;a=blob;f=COPYING;h=d159169d1050894d3ea3b98e1c965c4058208fe1;hb=HEAD
+ crunched_md5sums['fb530f66a7a89ce920f0e912b5b66d4b'] = 'GPLv2'
+ # https://github.com/gkos/nrf24/blob/master/COPYING
+ crunched_md5sums['7b6aaa4daeafdfa6ed5443fd2684581b'] = 'GPLv2'
+ # https://github.com/josch09/resetusb/blob/master/COPYING
+ crunched_md5sums['8b8ac1d631a4d220342e83bcf1a1fbc3'] = 'GPLv3'
+ # https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv2.1
+ crunched_md5sums['2ea316ed973ae176e502e2297b574bb3'] = 'LGPLv2.1'
+ # unixODBC-2.3.4 COPYING
+ crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
+ # https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
+ crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/epl-v10
+ crunched_md5sums['efe2cb9a35826992b9df68224e3c2628'] = 'EPL-1.0'
+ # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/edl-v10
+ crunched_md5sums['0a9c78c0a398d1bbce4a166757d60387'] = 'EDL-1.0'
+ lictext = []
+ with open(licfile, 'r', errors='surrogateescape') as f:
+ for line in f:
+ # Drop opening statements
+ if copyright_re.match(line):
+ continue
+ elif license_title_re.match(line):
+ continue
+ elif license_statement_re.match(line):
+ continue
+ # Squash spaces, and replace smart quotes, double quotes
+ # and backticks with single quotes
+ line = oe.utils.squashspaces(line.strip())
+ line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
+ if line:
+ lictext.append(line)
+
+ m = hashlib.md5()
+ try:
+ m.update(' '.join(lictext).encode('utf-8'))
+ md5val = m.hexdigest()
+ except UnicodeEncodeError:
+ md5val = None
+ lictext = ''
+ license = crunched_md5sums.get(md5val, None)
+ return license, md5val, lictext
+
+def guess_license(srctree, d):
+ import bb
+ md5sums = get_license_md5sums(d)
+
+ licenses = []
+ licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
+ licfiles = []
+ for root, dirs, files in os.walk(srctree):
+ for fn in files:
+ for spec in licspecs:
+ if fnmatch.fnmatch(fn, spec):
+ fullpath = os.path.join(root, fn)
+ if not fullpath in licfiles:
+ licfiles.append(fullpath)
+ for licfile in licfiles:
+ md5value = bb.utils.md5_file(licfile)
+ license = md5sums.get(md5value, None)
+ if not license:
+ license, crunched_md5, lictext = crunch_license(licfile)
+ if not license:
+ license = 'Unknown'
+ licenses.append((license, os.path.relpath(licfile, srctree), md5value))
+
+ # FIXME should we grab at least one source file with a license header and add that too?
+
+ return licenses
+
+def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn='${PN}'):
+ """
+ Given a list of (license, path, md5sum) as returned by guess_license(),
+ a dict of package name to path mappings, write out a set of
+ package-specific LICENSE values.
+ """
+ pkglicenses = {pn: []}
+ for license, licpath, _ in licvalues:
+ for pkgname, pkgpath in packages.items():
+ if licpath.startswith(pkgpath + '/'):
+ if pkgname in pkglicenses:
+ pkglicenses[pkgname].append(license)
+ else:
+ pkglicenses[pkgname] = [license]
+ break
+ else:
+ # Accumulate on the main package
+ pkglicenses[pn].append(license)
+ outlicenses = {}
+ for pkgname in packages:
+ license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
+ if license == 'Unknown' and pkgname in fallback_licenses:
+ license = fallback_licenses[pkgname]
+ outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
+ outlicenses[pkgname] = license.split()
+ return outlicenses
+
+def read_pkgconfig_provides(d):
+ pkgdatadir = d.getVar('PKGDATA_DIR')
+ pkgmap = {}
+ for fn in glob.glob(os.path.join(pkgdatadir, 'shlibs2', '*.pclist')):
+ with open(fn, 'r') as f:
+ for line in f:
+ pkgmap[os.path.basename(line.rstrip())] = os.path.splitext(os.path.basename(fn))[0]
+ recipemap = {}
+ for pc, pkg in pkgmap.items():
+ pkgdatafile = os.path.join(pkgdatadir, 'runtime', pkg)
+ if os.path.exists(pkgdatafile):
+ with open(pkgdatafile, 'r') as f:
+ for line in f:
+ if line.startswith('PN: '):
+ recipemap[pc] = line.split(':', 1)[1].strip()
+ return recipemap
+
+def convert_debian(debpath):
+ value_map = {'Package': 'PN',
+ 'Version': 'PV',
+ 'Section': 'SECTION',
+ 'License': 'LICENSE',
+ 'Homepage': 'HOMEPAGE'}
+
+ # FIXME extend this mapping - perhaps use distro_alias.inc?
+ depmap = {'libz-dev': 'zlib'}
+
+ values = {}
+ depends = []
+ with open(os.path.join(debpath, 'control'), 'r', errors='surrogateescape') as f:
+ indesc = False
+ for line in f:
+ if indesc:
+ if line.startswith(' '):
+ if line.startswith(' This package contains'):
+ indesc = False
+ else:
+ if 'DESCRIPTION' in values:
+ values['DESCRIPTION'] += ' ' + line.strip()
+ else:
+ values['DESCRIPTION'] = line.strip()
+ else:
+ indesc = False
+ if not indesc:
+ splitline = line.split(':', 1)
+ if len(splitline) < 2:
+ continue
+ key = splitline[0]
+ value = splitline[1].strip()
+ if key == 'Build-Depends':
+ for dep in value.split(','):
+ dep = dep.split()[0]
+ mapped = depmap.get(dep, '')
+ if mapped:
+ depends.append(mapped)
+ elif key == 'Description':
+ values['SUMMARY'] = value
+ indesc = True
+ else:
+ varname = value_map.get(key, None)
+ if varname:
+ values[varname] = value
+ postinst = os.path.join(debpath, 'postinst')
+ postrm = os.path.join(debpath, 'postrm')
+ preinst = os.path.join(debpath, 'preinst')
+ prerm = os.path.join(debpath, 'prerm')
+ sfiles = [postinst, postrm, preinst, prerm]
+ for sfile in sfiles:
+ if os.path.isfile(sfile):
+ logger.info("Converting %s file to recipe function..." %
+ os.path.basename(sfile).upper())
+ content = []
+ with open(sfile) as f:
+ for line in f:
+ if "#!/" in line:
+ continue
+ line = line.rstrip("\n")
+ if line.strip():
+ content.append(line)
+ if content:
+ values[os.path.basename(f.name)] = content
+
+ #if depends:
+ # values['DEPENDS'] = ' '.join(depends)
+
+ return values
+
+def convert_rpm_xml(xmlfile):
+ '''Converts the output from rpm -qp --xml to a set of variable values'''
+ import xml.etree.ElementTree as ElementTree
+ rpmtag_map = {'Name': 'PN',
+ 'Version': 'PV',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Url': 'HOMEPAGE'}
+
+ values = {}
+ tree = ElementTree.parse(xmlfile)
+ root = tree.getroot()
+ for child in root:
+ if child.tag == 'rpmTag':
+ name = child.attrib.get('name', None)
+ if name:
+ varname = rpmtag_map.get(name, None)
+ if varname:
+ values[varname] = child[0].text
+ return values
+
+
+def register_commands(subparsers):
+ parser_create = subparsers.add_parser('create',
+ help='Create a new recipe',
+ description='Creates a new recipe from a source tree')
+ parser_create.add_argument('source', help='Path or URL to source')
+ parser_create.add_argument('-o', '--outfile', help='Specify filename for recipe to create')
+ parser_create.add_argument('-p', '--provides', help='Specify an alias for the item provided by the recipe')
+ parser_create.add_argument('-m', '--machine', help='Make recipe machine-specific as opposed to architecture-specific', action='store_true')
+ parser_create.add_argument('-x', '--extract-to', metavar='EXTRACTPATH', help='Assuming source is a URL, fetch it and extract it to the directory specified as %(metavar)s')
+ parser_create.add_argument('-N', '--name', help='Name to use within recipe (PN)')
+ parser_create.add_argument('-V', '--version', help='Version to use within recipe (PV)')
+ parser_create.add_argument('-b', '--binary', help='Treat the source tree as something that should be installed verbatim (no compilation, same directory structure)', action='store_true')
+ parser_create.add_argument('--also-native', help='Also add native variant (i.e. support building recipe for the build host as well as the target machine)', action='store_true')
+ parser_create.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
+ group = parser_create.add_mutually_exclusive_group()
+ group.add_argument('-a', '--autorev', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
+ group.add_argument('-S', '--srcrev', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
+ parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
+ parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
+ parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
+ parser_create.set_defaults(func=create_recipe)
+
diff --git a/poky/scripts/lib/recipetool/create_buildsys.py b/poky/scripts/lib/recipetool/create_buildsys.py
new file mode 100644
index 000000000..4743c740c
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create_buildsys.py
@@ -0,0 +1,893 @@
+# Recipe creation tool - create command build system handlers
+#
+# Copyright (C) 2014-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+import glob
+from recipetool.create import RecipeHandler, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def plugin_init(pluginlist):
+ # Take a reference to the list so we can use it later
+ global plugins
+ plugins = pluginlist
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class CmakeRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['CMakeLists.txt']):
+ classes.append('cmake')
+ values = CmakeRecipeHandler.extract_cmake_deps(lines_before, srctree, extravalues)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.items():
+ lines_before.append('%s = "%s"' % (var, value))
+ lines_after.append('# Specify any options you want to pass to cmake using EXTRA_OECMAKE:')
+ lines_after.append('EXTRA_OECMAKE = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+ return False
+
+ @staticmethod
+ def extract_cmake_deps(outlines, srctree, extravalues, cmakelistsfile=None):
+ # Find all plugins that want to register handlers
+ logger.debug('Loading cmake handlers')
+ handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_cmake_handlers'):
+ plugin.register_cmake_handlers(handlers)
+
+ values = {}
+ inherits = []
+
+ if cmakelistsfile:
+ srcfiles = [cmakelistsfile]
+ else:
+ srcfiles = RecipeHandler.checkfiles(srctree, ['CMakeLists.txt'])
+
+ # Note that some of these are non-standard, but probably better to
+ # be able to map them anyway if we see them
+ cmake_pkgmap = {'alsa': 'alsa-lib',
+ 'aspell': 'aspell',
+ 'atk': 'atk',
+ 'bison': 'bison-native',
+ 'boost': 'boost',
+ 'bzip2': 'bzip2',
+ 'cairo': 'cairo',
+ 'cups': 'cups',
+ 'curl': 'curl',
+ 'curses': 'ncurses',
+ 'cvs': 'cvs',
+ 'drm': 'libdrm',
+ 'dbus': 'dbus',
+ 'dbusglib': 'dbus-glib',
+ 'egl': 'virtual/egl',
+ 'expat': 'expat',
+ 'flex': 'flex-native',
+ 'fontconfig': 'fontconfig',
+ 'freetype': 'freetype',
+ 'gettext': '',
+ 'git': '',
+ 'gio': 'glib-2.0',
+ 'giounix': 'glib-2.0',
+ 'glew': 'glew',
+ 'glib': 'glib-2.0',
+ 'glib2': 'glib-2.0',
+ 'glu': 'libglu',
+ 'glut': 'freeglut',
+ 'gobject': 'glib-2.0',
+ 'gperf': 'gperf-native',
+ 'gnutls': 'gnutls',
+ 'gtk2': 'gtk+',
+ 'gtk3': 'gtk+3',
+ 'gtk': 'gtk+3',
+ 'harfbuzz': 'harfbuzz',
+ 'icu': 'icu',
+ 'intl': 'virtual/libintl',
+ 'jpeg': 'jpeg',
+ 'libarchive': 'libarchive',
+ 'libiconv': 'virtual/libiconv',
+ 'liblzma': 'xz',
+ 'libxml2': 'libxml2',
+ 'libxslt': 'libxslt',
+ 'opengl': 'virtual/libgl',
+ 'openmp': '',
+ 'openssl': 'openssl',
+ 'pango': 'pango',
+ 'perl': '',
+ 'perllibs': '',
+ 'pkgconfig': '',
+ 'png': 'libpng',
+ 'pthread': '',
+ 'pythoninterp': '',
+ 'pythonlibs': '',
+ 'ruby': 'ruby-native',
+ 'sdl': 'libsdl',
+ 'sdl2': 'libsdl2',
+ 'subversion': 'subversion-native',
+ 'swig': 'swig-native',
+ 'tcl': 'tcl-native',
+ 'threads': '',
+ 'tiff': 'tiff',
+ 'wget': 'wget',
+ 'x11': 'libx11',
+ 'xcb': 'libxcb',
+ 'xext': 'libxext',
+ 'xfixes': 'libxfixes',
+ 'zlib': 'zlib',
+ }
+
+ pcdeps = []
+ libdeps = []
+ deps = []
+ unmappedpkgs = []
+
+ proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+
+ def find_cmake_package(pkg):
+ RecipeHandler.load_devel_filemap(tinfoil.config_data)
+ for fn, pn in RecipeHandler.recipecmakefilemap.items():
+ splitname = fn.split('/')
+ if len(splitname) > 1:
+ if splitname[0].lower().startswith(pkg.lower()):
+ if splitname[1] == '%s-config.cmake' % pkg.lower() or splitname[1] == '%sConfig.cmake' % pkg or splitname[1] == 'Find%s.cmake' % pkg:
+ return pn
+ return None
+
+ def interpret_value(value):
+ return value.strip('"')
+
+ def parse_cmake_file(fn, paths=None):
+ searchpaths = (paths or []) + [os.path.dirname(fn)]
+ logger.debug('Parsing file %s' % fn)
+ with open(fn, 'r', errors='surrogateescape') as f:
+ for line in f:
+ line = line.strip()
+ for handler in handlers:
+ if handler.process_line(srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
+ continue
+ res = include_re.match(line)
+ if res:
+ includefn = bb.utils.which(':'.join(searchpaths), res.group(1))
+ if includefn:
+ parse_cmake_file(includefn, searchpaths)
+ else:
+ logger.debug('Unable to recurse into include file %s' % res.group(1))
+ continue
+ res = subdir_re.match(line)
+ if res:
+ subdirfn = os.path.join(os.path.dirname(fn), res.group(1), 'CMakeLists.txt')
+ if os.path.exists(subdirfn):
+ parse_cmake_file(subdirfn, searchpaths)
+ else:
+ logger.debug('Unable to recurse into subdirectory file %s' % subdirfn)
+ continue
+ res = proj_re.match(line)
+ if res:
+ extravalues['PN'] = interpret_value(res.group(1).split()[0])
+ continue
+ res = pkgcm_re.match(line)
+ if res:
+ res = dep_re.findall(res.group(2))
+ if res:
+ pcdeps.extend([interpret_value(x[0]) for x in res])
+ inherits.append('pkgconfig')
+ continue
+ res = pkgsm_re.match(line)
+ if res:
+ res = dep_re.findall(res.group(2))
+ if res:
+ # Note: appending a tuple here!
+ item = tuple((interpret_value(x[0]) for x in res))
+ if len(item) == 1:
+ item = item[0]
+ pcdeps.append(item)
+ inherits.append('pkgconfig')
+ continue
+ res = findpackage_re.match(line)
+ if res:
+ origpkg = res.group(1)
+ pkg = interpret_value(origpkg)
+ found = False
+ for handler in handlers:
+ if handler.process_findpackage(srctree, fn, pkg, deps, outlines, inherits, values):
+ logger.debug('Mapped CMake package %s via handler %s' % (pkg, handler.__class__.__name__))
+ found = True
+ break
+ if found:
+ continue
+ elif pkg == 'Gettext':
+ inherits.append('gettext')
+ elif pkg == 'Perl':
+ inherits.append('perlnative')
+ elif pkg == 'PkgConfig':
+ inherits.append('pkgconfig')
+ elif pkg == 'PythonInterp':
+ inherits.append('pythonnative')
+ elif pkg == 'PythonLibs':
+ inherits.append('python-dir')
+ else:
+ # Try to map via looking at installed CMake packages in pkgdata
+ dep = find_cmake_package(pkg)
+ if dep:
+ logger.debug('Mapped CMake package %s to recipe %s via pkgdata' % (pkg, dep))
+ deps.append(dep)
+ else:
+ dep = cmake_pkgmap.get(pkg.lower(), None)
+ if dep:
+ logger.debug('Mapped CMake package %s to recipe %s via internal list' % (pkg, dep))
+ deps.append(dep)
+ elif dep is None:
+ unmappedpkgs.append(origpkg)
+ continue
+ res = checklib_re.match(line)
+ if res:
+ lib = interpret_value(res.group(1))
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ res = findlibrary_re.match(line)
+ if res:
+ libs = res.group(2).split()
+ for lib in libs:
+ if lib in ['HINTS', 'PATHS', 'PATH_SUFFIXES', 'DOC', 'NAMES_PER_DIR'] or lib.startswith(('NO_', 'CMAKE_', 'ONLY_CMAKE_')):
+ break
+ lib = interpret_value(lib)
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ if line.lower().startswith('useswig'):
+ deps.append('swig-native')
+ continue
+
+ parse_cmake_file(srcfiles[0])
+
+ if unmappedpkgs:
+ outlines.append('# NOTE: unable to map the following CMake package dependencies: %s' % ' '.join(list(set(unmappedpkgs))))
+
+ RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
+
+ for handler in handlers:
+ handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
+
+ if inherits:
+ values['inherit'] = ' '.join(list(set(inherits)))
+
+ return values
+
+
+class CmakeExtensionHandler(object):
+ '''Base class for CMake extension handlers'''
+ def process_line(self, srctree, fn, line, libdeps, pcdeps, deps, outlines, inherits, values):
+ '''
+ Handle a line parsed out of an CMake file.
+ Return True if you've completely handled the passed in line, otherwise return False.
+ '''
+ return False
+
+ def process_findpackage(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Handle a find_package package parsed out of a CMake file.
+ Return True if you've completely handled the passed in package, otherwise return False.
+ '''
+ return False
+
+ def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Apply any desired post-processing on the output
+ '''
+ return
+
+
+
+class SconsRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['SConstruct', 'Sconstruct', 'sconstruct']):
+ classes.append('scons')
+ lines_after.append('# Specify any options you want to pass to scons using EXTRA_OESCONS:')
+ lines_after.append('EXTRA_OESCONS = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+ return False
+
+
+class QmakeRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if RecipeHandler.checkfiles(srctree, ['*.pro']):
+ classes.append('qmake2')
+ handled.append('buildsystem')
+ return True
+ return False
+
+
+class AutotoolsRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ autoconf = False
+ if RecipeHandler.checkfiles(srctree, ['configure.ac', 'configure.in']):
+ autoconf = True
+ values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, extravalues)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.items():
+ lines_before.append('%s = "%s"' % (var, value))
+ else:
+ conffile = RecipeHandler.checkfiles(srctree, ['configure'])
+ if conffile:
+ # Check if this is just a pre-generated autoconf configure script
+ with open(conffile[0], 'r', errors='surrogateescape') as f:
+ for i in range(1, 10):
+ if 'Generated by GNU Autoconf' in f.readline():
+ autoconf = True
+ break
+
+ if autoconf and not ('PV' in extravalues and 'PN' in extravalues):
+ # Last resort
+ conffile = RecipeHandler.checkfiles(srctree, ['configure'])
+ if conffile:
+ with open(conffile[0], 'r', errors='surrogateescape') as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith('VERSION=') or line.startswith('PACKAGE_VERSION='):
+ pv = line.split('=')[1].strip('"\'')
+ if pv and not 'PV' in extravalues and validate_pv(pv):
+ extravalues['PV'] = pv
+ elif line.startswith('PACKAGE_NAME=') or line.startswith('PACKAGE='):
+ pn = line.split('=')[1].strip('"\'')
+ if pn and not 'PN' in extravalues:
+ extravalues['PN'] = pn
+
+ if autoconf:
+ lines_before.append('')
+ lines_before.append('# NOTE: if this software is not capable of being built in a separate build directory')
+ lines_before.append('# from the source, you should replace autotools with autotools-brokensep in the')
+ lines_before.append('# inherit line')
+ classes.append('autotools')
+ lines_after.append('# Specify any options you want to pass to the configure script using EXTRA_OECONF:')
+ lines_after.append('EXTRA_OECONF = ""')
+ lines_after.append('')
+ handled.append('buildsystem')
+ return True
+
+ return False
+
+ @staticmethod
+ def extract_autotools_deps(outlines, srctree, extravalues=None, acfile=None):
+ import shlex
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading autotools handlers')
+ handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_autotools_handlers'):
+ plugin.register_autotools_handlers(handlers)
+
+ values = {}
+ inherits = []
+
+ # Hardcoded map, we also use a dynamic one based on what's in the sysroot
+ progmap = {'flex': 'flex-native',
+ 'bison': 'bison-native',
+ 'm4': 'm4-native',
+ 'tar': 'tar-native',
+ 'ar': 'binutils-native',
+ 'ranlib': 'binutils-native',
+ 'ld': 'binutils-native',
+ 'strip': 'binutils-native',
+ 'libtool': '',
+ 'autoconf': '',
+ 'autoheader': '',
+ 'automake': '',
+ 'uname': '',
+ 'rm': '',
+ 'cp': '',
+ 'mv': '',
+ 'find': '',
+ 'awk': '',
+ 'sed': '',
+ }
+ progclassmap = {'gconftool-2': 'gconf',
+ 'pkg-config': 'pkgconfig',
+ 'python': 'pythonnative',
+ 'python3': 'python3native',
+ 'perl': 'perlnative',
+ 'makeinfo': 'texinfo',
+ }
+
+ pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile('([0-9.]+)')
+
+ defines = {}
+ def subst_defines(value):
+ newvalue = value
+ for define, defval in defines.items():
+ newvalue = newvalue.replace(define, defval)
+ if newvalue != value:
+ return subst_defines(newvalue)
+ return value
+
+ def process_value(value):
+ value = value.replace('[', '').replace(']', '')
+ if value.startswith('m4_esyscmd(') or value.startswith('m4_esyscmd_s('):
+ cmd = subst_defines(value[value.index('(')+1:-1])
+ try:
+ if '|' in cmd:
+ cmd = 'set -o pipefail; ' + cmd
+ stdout, _ = bb.process.run(cmd, cwd=srctree, shell=True)
+ ret = stdout.rstrip()
+ except bb.process.ExecutionError as e:
+ ret = ''
+ elif value.startswith('m4_'):
+ return None
+ ret = subst_defines(value)
+ if ret:
+ ret = ret.strip('"\'')
+ return ret
+
+ # Since a configure.ac file is essentially a program, this is only ever going to be
+ # a hack unfortunately; but it ought to be enough of an approximation
+ if acfile:
+ srcfiles = [acfile]
+ else:
+ srcfiles = RecipeHandler.checkfiles(srctree, ['acinclude.m4', 'configure.ac', 'configure.in'])
+
+ pcdeps = []
+ libdeps = []
+ deps = []
+ unmapped = []
+
+ RecipeHandler.load_binmap(tinfoil.config_data)
+
+ def process_macro(keyword, value):
+ for handler in handlers:
+ if handler.process_macro(srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
+ return
+ logger.debug('Found keyword %s with value "%s"' % (keyword, value))
+ if keyword == 'PKG_CHECK_MODULES':
+ res = pkg_re.search(value)
+ if res:
+ res = dep_re.findall(res.group(1))
+ if res:
+ pcdeps.extend([x[0] for x in res])
+ inherits.append('pkgconfig')
+ elif keyword == 'PKG_CHECK_EXISTS':
+ res = pkgce_re.search(value)
+ if res:
+ res = dep_re.findall(res.group(1))
+ if res:
+ pcdeps.extend([x[0] for x in res])
+ inherits.append('pkgconfig')
+ elif keyword in ('AM_GNU_GETTEXT', 'AM_GLIB_GNU_GETTEXT', 'GETTEXT_PACKAGE'):
+ inherits.append('gettext')
+ elif keyword in ('AC_PROG_INTLTOOL', 'IT_PROG_INTLTOOL'):
+ deps.append('intltool-native')
+ elif keyword == 'AM_PATH_GLIB_2_0':
+ deps.append('glib-2.0')
+ elif keyword in ('AC_CHECK_PROG', 'AC_PATH_PROG', 'AX_WITH_PROG'):
+ res = progs_re.search(value)
+ if res:
+ for prog in shlex.split(res.group(1)):
+ prog = prog.split()[0]
+ for handler in handlers:
+ if handler.process_prog(srctree, keyword, value, prog, deps, outlines, inherits, values):
+ return
+ progclass = progclassmap.get(prog, None)
+ if progclass:
+ inherits.append(progclass)
+ else:
+ progdep = RecipeHandler.recipebinmap.get(prog, None)
+ if not progdep:
+ progdep = progmap.get(prog, None)
+ if progdep:
+ deps.append(progdep)
+ elif progdep is None:
+ if not prog.startswith('$'):
+ unmapped.append(prog)
+ elif keyword == 'AC_CHECK_LIB':
+ res = lib_re.search(value)
+ if res:
+ lib = res.group(1)
+ if not lib.startswith('$'):
+ libdeps.append(lib)
+ elif keyword == 'AX_CHECK_LIBRARY':
+ res = libx_re.search(value)
+ if res:
+ lib = res.group(2)
+ if not lib.startswith('$'):
+ header = res.group(1)
+ libdeps.append((lib, header))
+ elif keyword == 'AC_PATH_X':
+ deps.append('libx11')
+ elif keyword in ('AX_BOOST', 'BOOST_REQUIRE'):
+ deps.append('boost')
+ elif keyword in ('AC_PROG_LEX', 'AM_PROG_LEX', 'AX_PROG_FLEX'):
+ deps.append('flex-native')
+ elif keyword in ('AC_PROG_YACC', 'AX_PROG_BISON'):
+ deps.append('bison-native')
+ elif keyword == 'AX_CHECK_ZLIB':
+ deps.append('zlib')
+ elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
+ deps.append('openssl')
+ elif keyword == 'AX_LIB_CURL':
+ deps.append('curl')
+ elif keyword == 'AX_LIB_BEECRYPT':
+ deps.append('beecrypt')
+ elif keyword == 'AX_LIB_EXPAT':
+ deps.append('expat')
+ elif keyword == 'AX_LIB_GCRYPT':
+ deps.append('libgcrypt')
+ elif keyword == 'AX_LIB_NETTLE':
+ deps.append('nettle')
+ elif keyword == 'AX_LIB_READLINE':
+ deps.append('readline')
+ elif keyword == 'AX_LIB_SQLITE3':
+ deps.append('sqlite3')
+ elif keyword == 'AX_LIB_TAGLIB':
+ deps.append('taglib')
+ elif keyword in ['AX_PKG_SWIG', 'AC_PROG_SWIG']:
+ deps.append('swig-native')
+ elif keyword == 'AX_PROG_XSLTPROC':
+ deps.append('libxslt-native')
+ elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
+ pythonclass = 'pythonnative'
+ res = version_re.search(value)
+ if res:
+ if res.group(1).startswith('3'):
+ pythonclass = 'python3native'
+ # Avoid replacing python3native with pythonnative
+ if not pythonclass in inherits and not 'python3native' in inherits:
+ if 'pythonnative' in inherits:
+ inherits.remove('pythonnative')
+ inherits.append(pythonclass)
+ elif keyword == 'AX_WITH_CURSES':
+ deps.append('ncurses')
+ elif keyword == 'AX_PATH_BDB':
+ deps.append('db')
+ elif keyword == 'AX_PATH_LIB_PCRE':
+ deps.append('libpcre')
+ elif keyword == 'AC_INIT':
+ if extravalues is not None:
+ res = ac_init_re.match(value)
+ if res:
+ extravalues['PN'] = process_value(res.group(1))
+ pv = process_value(res.group(2))
+ if validate_pv(pv):
+ extravalues['PV'] = pv
+ elif keyword == 'AM_INIT_AUTOMAKE':
+ if extravalues is not None:
+ if 'PN' not in extravalues:
+ res = am_init_re.match(value)
+ if res:
+ if res.group(1) != 'AC_PACKAGE_NAME':
+ extravalues['PN'] = process_value(res.group(1))
+ pv = process_value(res.group(2))
+ if validate_pv(pv):
+ extravalues['PV'] = pv
+ elif keyword == 'define(':
+ res = define_re.match(value)
+ if res:
+ key = res.group(2).strip('[]')
+ value = process_value(res.group(3))
+ if value is not None:
+ defines[key] = value
+
+ keywords = ['PKG_CHECK_MODULES',
+ 'PKG_CHECK_EXISTS',
+ 'AM_GNU_GETTEXT',
+ 'AM_GLIB_GNU_GETTEXT',
+ 'GETTEXT_PACKAGE',
+ 'AC_PROG_INTLTOOL',
+ 'IT_PROG_INTLTOOL',
+ 'AM_PATH_GLIB_2_0',
+ 'AC_CHECK_PROG',
+ 'AC_PATH_PROG',
+ 'AX_WITH_PROG',
+ 'AC_CHECK_LIB',
+ 'AX_CHECK_LIBRARY',
+ 'AC_PATH_X',
+ 'AX_BOOST',
+ 'BOOST_REQUIRE',
+ 'AC_PROG_LEX',
+ 'AM_PROG_LEX',
+ 'AX_PROG_FLEX',
+ 'AC_PROG_YACC',
+ 'AX_PROG_BISON',
+ 'AX_CHECK_ZLIB',
+ 'AX_CHECK_OPENSSL',
+ 'AX_LIB_CRYPTO',
+ 'AX_LIB_CURL',
+ 'AX_LIB_BEECRYPT',
+ 'AX_LIB_EXPAT',
+ 'AX_LIB_GCRYPT',
+ 'AX_LIB_NETTLE',
+ 'AX_LIB_READLINE'
+ 'AX_LIB_SQLITE3',
+ 'AX_LIB_TAGLIB',
+ 'AX_PKG_SWIG',
+ 'AC_PROG_SWIG',
+ 'AX_PROG_XSLTPROC',
+ 'AC_PYTHON_DEVEL',
+ 'AX_PYTHON_DEVEL',
+ 'AM_PATH_PYTHON',
+ 'AX_WITH_CURSES',
+ 'AX_PATH_BDB',
+ 'AX_PATH_LIB_PCRE',
+ 'AC_INIT',
+ 'AM_INIT_AUTOMAKE',
+ 'define(',
+ ]
+
+ for handler in handlers:
+ handler.extend_keywords(keywords)
+
+ for srcfile in srcfiles:
+ nesting = 0
+ in_keyword = ''
+ partial = ''
+ with open(srcfile, 'r', errors='surrogateescape') as f:
+ for line in f:
+ if in_keyword:
+ partial += ' ' + line.strip()
+ if partial.endswith('\\'):
+ partial = partial[:-1]
+ nesting = nesting + line.count('(') - line.count(')')
+ if nesting == 0:
+ process_macro(in_keyword, partial)
+ partial = ''
+ in_keyword = ''
+ else:
+ for keyword in keywords:
+ if keyword in line:
+ nesting = line.count('(') - line.count(')')
+ if nesting > 0:
+ partial = line.strip()
+ if partial.endswith('\\'):
+ partial = partial[:-1]
+ in_keyword = keyword
+ else:
+ process_macro(keyword, line.strip())
+ break
+
+ if in_keyword:
+ process_macro(in_keyword, partial)
+
+ if extravalues:
+ for k,v in list(extravalues.items()):
+ if v:
+ if v.startswith('$') or v.startswith('@') or v.startswith('%'):
+ del extravalues[k]
+ else:
+ extravalues[k] = v.strip('"\'').rstrip('()')
+
+ if unmapped:
+ outlines.append('# NOTE: the following prog dependencies are unknown, ignoring: %s' % ' '.join(list(set(unmapped))))
+
+ RecipeHandler.handle_depends(libdeps, pcdeps, deps, outlines, values, tinfoil.config_data)
+
+ for handler in handlers:
+ handler.post_process(srctree, libdeps, pcdeps, deps, outlines, inherits, values)
+
+ if inherits:
+ values['inherit'] = ' '.join(list(set(inherits)))
+
+ return values
+
+
+class AutotoolsExtensionHandler(object):
+ '''Base class for Autotools extension handlers'''
+ def process_macro(self, srctree, keyword, value, process_value, libdeps, pcdeps, deps, outlines, inherits, values):
+ '''
+ Handle a macro parsed out of an autotools file. Note that if you want this to be called
+ for any macro other than the ones AutotoolsRecipeHandler already looks for, you'll need
+ to add it to the keywords list in extend_keywords().
+ Return True if you've completely handled the passed in macro, otherwise return False.
+ '''
+ return False
+
+ def extend_keywords(self, keywords):
+ '''Adds keywords to be recognised by the parser (so that you get a call to process_macro)'''
+ return
+
+ def process_prog(self, srctree, keyword, value, prog, deps, outlines, inherits, values):
+ '''
+ Handle an AC_PATH_PROG, AC_CHECK_PROG etc. line
+ Return True if you've completely handled the passed in macro, otherwise return False.
+ '''
+ return False
+
+ def post_process(self, srctree, fn, pkg, deps, outlines, inherits, values):
+ '''
+ Apply any desired post-processing on the output
+ '''
+ return
+
+
+class MakefileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ makefile = RecipeHandler.checkfiles(srctree, ['Makefile', 'makefile', 'GNUmakefile'])
+ if makefile:
+ lines_after.append('# NOTE: this is a Makefile-only piece of software, so we cannot generate much of the')
+ lines_after.append('# recipe automatically - you will need to examine the Makefile yourself and ensure')
+ lines_after.append('# that the appropriate arguments are passed in.')
+ lines_after.append('')
+
+ scanfile = os.path.join(srctree, 'configure.scan')
+ skipscan = False
+ try:
+ stdout, stderr = bb.process.run('autoscan', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ skipscan = True
+ if scanfile and os.path.exists(scanfile):
+ values = AutotoolsRecipeHandler.extract_autotools_deps(lines_before, srctree, acfile=scanfile)
+ classes.extend(values.pop('inherit', '').split())
+ for var, value in values.items():
+ if var == 'DEPENDS':
+ lines_before.append('# NOTE: some of these dependencies may be optional, check the Makefile and/or upstream documentation')
+ lines_before.append('%s = "%s"' % (var, value))
+ lines_before.append('')
+ for f in ['configure.scan', 'autoscan.log']:
+ fp = os.path.join(srctree, f)
+ if os.path.exists(fp):
+ os.remove(fp)
+
+ self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
+
+ func = []
+ func.append('# You will almost certainly need to add additional arguments here')
+ func.append('oe_runmake')
+ self.genfunction(lines_after, 'do_compile', func)
+
+ installtarget = True
+ try:
+ stdout, stderr = bb.process.run('make -n install', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ if e.exitcode != 1:
+ installtarget = False
+ func = []
+ if installtarget:
+ func.append('# This is a guess; additional arguments may be required')
+ makeargs = ''
+ with open(makefile[0], 'r', errors='surrogateescape') as f:
+ for i in range(1, 100):
+ if 'DESTDIR' in f.readline():
+ makeargs += " 'DESTDIR=${D}'"
+ break
+ func.append('oe_runmake install%s' % makeargs)
+ else:
+ func.append('# NOTE: unable to determine what to put here - there is a Makefile but no')
+ func.append('# target named "install", so you will need to define this yourself')
+ self.genfunction(lines_after, 'do_install', func)
+
+ handled.append('buildsystem')
+ else:
+ lines_after.append('# NOTE: no Makefile found, unable to determine what needs to be done')
+ lines_after.append('')
+ self.genfunction(lines_after, 'do_configure', ['# Specify any needed configure commands here'])
+ self.genfunction(lines_after, 'do_compile', ['# Specify compilation commands here'])
+ self.genfunction(lines_after, 'do_install', ['# Specify install commands here'])
+
+
+class VersionFileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'PV' not in extravalues:
+ # Look for a VERSION or version file containing a single line consisting
+ # only of a version number
+ filelist = RecipeHandler.checkfiles(srctree, ['VERSION', 'version'])
+ version = None
+ for fileitem in filelist:
+ linecount = 0
+ with open(fileitem, 'r', errors='surrogateescape') as f:
+ for line in f:
+ line = line.rstrip().strip('"\'')
+ linecount += 1
+ if line:
+ if linecount > 1:
+ version = None
+ break
+ else:
+ if validate_pv(line):
+ version = line
+ if version:
+ extravalues['PV'] = version
+ break
+
+
+class SpecFileRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'PV' in extravalues and 'PN' in extravalues:
+ return
+ filelist = RecipeHandler.checkfiles(srctree, ['*.spec'], recursive=True)
+ valuemap = {'Name': 'PN',
+ 'Version': 'PV',
+ 'Summary': 'SUMMARY',
+ 'Url': 'HOMEPAGE',
+ 'License': 'LICENSE'}
+ foundvalues = {}
+ for fileitem in filelist:
+ linecount = 0
+ with open(fileitem, 'r', errors='surrogateescape') as f:
+ for line in f:
+ for value, varname in valuemap.items():
+ if line.startswith(value + ':') and not varname in foundvalues:
+ foundvalues[varname] = line.split(':', 1)[1].strip()
+ break
+ if len(foundvalues) == len(valuemap):
+ break
+ # Drop values containing unexpanded RPM macros
+ for k in list(foundvalues.keys()):
+ if '%' in foundvalues[k]:
+ del foundvalues[k]
+ if 'PV' in foundvalues:
+ if not validate_pv(foundvalues['PV']):
+ del foundvalues['PV']
+ license = foundvalues.pop('LICENSE', None)
+ if license:
+ liccomment = '# NOTE: spec file indicates the license may be "%s"' % license
+ for i, line in enumerate(lines_before):
+ if line.startswith('LICENSE ='):
+ lines_before.insert(i, liccomment)
+ break
+ else:
+ lines_before.append(liccomment)
+ extravalues.update(foundvalues)
+
+def register_recipe_handlers(handlers):
+ # Set priorities with some gaps so that other plugins can insert
+ # their own handlers (so avoid changing these numbers)
+ handlers.append((CmakeRecipeHandler(), 50))
+ handlers.append((AutotoolsRecipeHandler(), 40))
+ handlers.append((SconsRecipeHandler(), 30))
+ handlers.append((QmakeRecipeHandler(), 20))
+ handlers.append((MakefileRecipeHandler(), 10))
+ handlers.append((VersionFileRecipeHandler(), -1))
+ handlers.append((SpecFileRecipeHandler(), -1))
diff --git a/poky/scripts/lib/recipetool/create_buildsys_python.py b/poky/scripts/lib/recipetool/create_buildsys_python.py
new file mode 100644
index 000000000..5bd2aa337
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create_buildsys_python.py
@@ -0,0 +1,719 @@
+# Recipe creation tool - create build system handler for python
+#
+# Copyright (C) 2015 Mentor Graphics Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import ast
+import codecs
+import collections
+import distutils.command.build_py
+import email
+import imp
+import glob
+import itertools
+import logging
+import os
+import re
+import sys
+import subprocess
+from recipetool.create import RecipeHandler
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class PythonRecipeHandler(RecipeHandler):
+ base_pkgdeps = ['python-core']
+ excluded_pkgdeps = ['python-dbg']
+ # os.path is provided by python-core
+ assume_provided = ['builtins', 'os.path']
+ # Assumes that the host python builtin_module_names is sane for target too
+ assume_provided = assume_provided + list(sys.builtin_module_names)
+
+ bbvar_map = {
+ 'Name': 'PN',
+ 'Version': 'PV',
+ 'Home-page': 'HOMEPAGE',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Requires': 'RDEPENDS_${PN}',
+ 'Provides': 'RPROVIDES_${PN}',
+ 'Obsoletes': 'RREPLACES_${PN}',
+ }
+ # PN/PV are already set by recipetool core & desc can be extremely long
+ excluded_fields = [
+ 'Description',
+ ]
+ setup_parse_map = {
+ 'Url': 'Home-page',
+ 'Classifiers': 'Classifier',
+ 'Description': 'Summary',
+ }
+ setuparg_map = {
+ 'Home-page': 'url',
+ 'Classifier': 'classifiers',
+ 'Summary': 'description',
+ 'Description': 'long-description',
+ }
+ # Values which are lists, used by the setup.py argument based metadata
+ # extraction method, to determine how to process the setup.py output.
+ setuparg_list_fields = [
+ 'Classifier',
+ 'Requires',
+ 'Provides',
+ 'Obsoletes',
+ 'Platform',
+ 'Supported-Platform',
+ ]
+ setuparg_multi_line_values = ['Description']
+ replacements = [
+ ('License', r' +$', ''),
+ ('License', r'^ +', ''),
+ ('License', r' ', '-'),
+ ('License', r'^GNU-', ''),
+ ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
+ ('License', r'^UNKNOWN$', ''),
+
+ # Remove currently unhandled version numbers from these variables
+ ('Requires', r' *\([^)]*\)', ''),
+ ('Provides', r' *\([^)]*\)', ''),
+ ('Obsoletes', r' *\([^)]*\)', ''),
+ ('Install-requires', r'^([^><= ]+).*', r'\1'),
+ ('Extras-require', r'^([^><= ]+).*', r'\1'),
+ ('Tests-require', r'^([^><= ]+).*', r'\1'),
+
+ # Remove unhandled dependency on particular features (e.g. foo[PDF])
+ ('Install-requires', r'\[[^\]]+\]$', ''),
+ ]
+
+ classifier_license_map = {
+ 'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
+ 'License :: OSI Approved :: Apache Software License': 'Apache',
+ 'License :: OSI Approved :: Apple Public Source License': 'APSL',
+ 'License :: OSI Approved :: Artistic License': 'Artistic',
+ 'License :: OSI Approved :: Attribution Assurance License': 'AAL',
+ 'License :: OSI Approved :: BSD License': 'BSD',
+ 'License :: OSI Approved :: Common Public License': 'CPL',
+ 'License :: OSI Approved :: Eiffel Forum License': 'EFL',
+ 'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
+ 'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0+',
+ 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0',
+ 'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
+ 'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
+ 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0+',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0+',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0+',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0+',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
+ 'License :: OSI Approved :: IBM Public License': 'IPL',
+ 'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
+ 'License :: OSI Approved :: Intel Open Source License': 'Intel',
+ 'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
+ 'License :: OSI Approved :: MIT License': 'MIT',
+ 'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
+ 'License :: OSI Approved :: Motosoto License': 'Motosoto',
+ 'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
+ 'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
+ 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)': 'MPL-2.0',
+ 'License :: OSI Approved :: Nethack General Public License': 'NGPL',
+ 'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
+ 'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
+ 'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
+ 'License :: OSI Approved :: Python Software Foundation License': 'PSF',
+ 'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
+ 'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
+ 'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
+ 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': '-- Sun Industry Standards Source License (SISSL)',
+ 'License :: OSI Approved :: Sun Public License': 'SPL',
+ 'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
+ 'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
+ 'License :: OSI Approved :: W3C License': 'W3C',
+ 'License :: OSI Approved :: X.Net License': 'Xnet',
+ 'License :: OSI Approved :: Zope Public License': 'ZPL',
+ 'License :: OSI Approved :: zlib/libpng License': 'Zlib',
+ }
+
+ def __init__(self):
+ pass
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ if 'buildsystem' in handled:
+ return False
+
+ if not RecipeHandler.checkfiles(srctree, ['setup.py']):
+ return
+
+ # setup.py is always parsed to get at certain required information, such as
+ # distutils vs setuptools
+ #
+ # If egg info is available, we use it for both its PKG-INFO metadata
+ # and for its requires.txt for install_requires.
+ # If PKG-INFO is available but no egg info is, we use that for metadata in preference to
+ # the parsed setup.py, but use the install_requires info from the
+ # parsed setup.py.
+
+ setupscript = os.path.join(srctree, 'setup.py')
+ try:
+ setup_info, uses_setuptools, setup_non_literals, extensions = self.parse_setup_py(setupscript)
+ except Exception:
+ logger.exception("Failed to parse setup.py")
+ setup_info, uses_setuptools, setup_non_literals, extensions = {}, True, [], []
+
+ egginfo = glob.glob(os.path.join(srctree, '*.egg-info'))
+ if egginfo:
+ info = self.get_pkginfo(os.path.join(egginfo[0], 'PKG-INFO'))
+ requires_txt = os.path.join(egginfo[0], 'requires.txt')
+ if os.path.exists(requires_txt):
+ with codecs.open(requires_txt) as f:
+ inst_req = []
+ extras_req = collections.defaultdict(list)
+ current_feature = None
+ for line in f.readlines():
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line.startswith('['):
+ current_feature = line[1:-1]
+ elif current_feature:
+ extras_req[current_feature].append(line)
+ else:
+ inst_req.append(line)
+ info['Install-requires'] = inst_req
+ info['Extras-require'] = extras_req
+ elif RecipeHandler.checkfiles(srctree, ['PKG-INFO']):
+ info = self.get_pkginfo(os.path.join(srctree, 'PKG-INFO'))
+
+ if setup_info:
+ if 'Install-requires' in setup_info:
+ info['Install-requires'] = setup_info['Install-requires']
+ if 'Extras-require' in setup_info:
+ info['Extras-require'] = setup_info['Extras-require']
+ else:
+ if setup_info:
+ info = setup_info
+ else:
+ info = self.get_setup_args_info(setupscript)
+
+ # Grab the license value before applying replacements
+ license_str = info.get('License', '').strip()
+
+ self.apply_info_replacements(info)
+
+ if uses_setuptools:
+ classes.append('setuptools')
+ else:
+ classes.append('distutils')
+
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith('LICENSE = '):
+ lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
+ break
+
+ if 'Classifier' in info:
+ existing_licenses = info.get('License', '')
+ licenses = []
+ for classifier in info['Classifier']:
+ if classifier in self.classifier_license_map:
+ license = self.classifier_license_map[classifier]
+ if license == 'Apache' and 'Apache-2.0' in existing_licenses:
+ license = 'Apache-2.0'
+ elif license == 'GPL':
+ if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
+ license = 'GPL-2.0'
+ elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
+ license = 'GPL-3.0'
+ elif license == 'LGPL':
+ if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
+ license = 'LGPL-2.1'
+ elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
+ license = 'LGPL-2.0'
+ elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
+ license = 'LGPL-3.0'
+ licenses.append(license)
+
+ if licenses:
+ info['License'] = ' & '.join(licenses)
+
+ # Map PKG-INFO & setup.py fields to bitbake variables
+ for field, values in info.items():
+ if field in self.excluded_fields:
+ continue
+
+ if field not in self.bbvar_map:
+ continue
+
+ if isinstance(values, str):
+ value = values
+ else:
+ value = ' '.join(str(v) for v in values if v)
+
+ bbvar = self.bbvar_map[field]
+ if bbvar not in extravalues and value:
+ extravalues[bbvar] = value
+
+ mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
+
+ extras_req = set()
+ if 'Extras-require' in info:
+ extras_req = info['Extras-require']
+ if extras_req:
+ lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
+ lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
+ lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('#')
+ lines_after.append('# Uncomment this line to enable all the optional features.')
+ lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
+ for feature, feature_reqs in extras_req.items():
+ unmapped_deps.difference_update(feature_reqs)
+
+ feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
+
+ inst_reqs = set()
+ if 'Install-requires' in info:
+ if extras_req:
+ lines_after.append('')
+ inst_reqs = info['Install-requires']
+ if inst_reqs:
+ unmapped_deps.difference_update(inst_reqs)
+
+ inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
+ lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+
+ if mapped_deps:
+ name = info.get('Name')
+ if name and name[0] in mapped_deps:
+ # Attempt to avoid self-reference
+ mapped_deps.remove(name[0])
+ mapped_deps -= set(self.excluded_pkgdeps)
+ if inst_reqs or extras_req:
+ lines_after.append('')
+ lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
+ lines_after.append('# python sources, and might not be 100% accurate.')
+ lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+
+ unmapped_deps -= set(extensions)
+ unmapped_deps -= set(self.assume_provided)
+ if unmapped_deps:
+ if mapped_deps:
+ lines_after.append('')
+ lines_after.append('# WARNING: We were unable to map the following python package/module')
+ lines_after.append('# dependencies to the bitbake packages which include them:')
+ lines_after.extend('# {}'.format(d) for d in sorted(unmapped_deps))
+
+ handled.append('buildsystem')
+
+ def get_pkginfo(self, pkginfo_fn):
+ msg = email.message_from_file(open(pkginfo_fn, 'r'))
+ msginfo = {}
+ for field in msg.keys():
+ values = msg.get_all(field)
+ if len(values) == 1:
+ msginfo[field] = values[0]
+ else:
+ msginfo[field] = values
+ return msginfo
+
+ def parse_setup_py(self, setupscript='./setup.py'):
+ with codecs.open(setupscript) as f:
+ info, imported_modules, non_literals, extensions = gather_setup_info(f)
+
+ def _map(key):
+ key = key.replace('_', '-')
+ key = key[0].upper() + key[1:]
+ if key in self.setup_parse_map:
+ key = self.setup_parse_map[key]
+ return key
+
+ # Naive mapping of setup() arguments to PKG-INFO field names
+ for d in [info, non_literals]:
+ for key, value in list(d.items()):
+ if key is None:
+ continue
+ new_key = _map(key)
+ if new_key != key:
+ del d[key]
+ d[new_key] = value
+
+ return info, 'setuptools' in imported_modules, non_literals, extensions
+
+ def get_setup_args_info(self, setupscript='./setup.py'):
+ cmd = ['python', setupscript]
+ info = {}
+ keys = set(self.bbvar_map.keys())
+ keys |= set(self.setuparg_list_fields)
+ keys |= set(self.setuparg_multi_line_values)
+ grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
+ for index, keys in grouped_keys:
+ if index == (True, False):
+ # Splitlines output for each arg as a list value
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = [l.rstrip() for l in arg_info.splitlines()]
+ elif index == (False, True):
+ # Entire output for each arg
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = arg_info
+ else:
+ info.update(self.get_setup_byline(list(keys), setupscript))
+ return info
+
+ def get_setup_byline(self, fields, setupscript='./setup.py'):
+ info = {}
+
+ cmd = ['python', setupscript]
+ cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
+ try:
+ info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ if len(fields) != len(info_lines):
+ logger.error('Mismatch between setup.py output lines and number of fields')
+ sys.exit(1)
+
+ for lineno, line in enumerate(info_lines):
+ line = line.rstrip()
+ info[fields[lineno]] = line
+ return info
+
+ def apply_info_replacements(self, info):
+ for variable, search, replace in self.replacements:
+ if variable not in info:
+ continue
+
+ def replace_value(search, replace, value):
+ if replace is None:
+ if re.search(search, value):
+ return None
+ else:
+ new_value = re.sub(search, replace, value)
+ if value != new_value:
+ return new_value
+ return value
+
+ value = info[variable]
+ if isinstance(value, str):
+ new_value = replace_value(search, replace, value)
+ if new_value is None:
+ del info[variable]
+ elif new_value != value:
+ info[variable] = new_value
+ elif hasattr(value, 'items'):
+ for dkey, dvalue in list(value.items()):
+ new_list = []
+ for pos, a_value in enumerate(dvalue):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ value[dkey] = new_list
+ else:
+ new_list = []
+ for pos, a_value in enumerate(value):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ info[variable] = new_list
+
+ def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
+ if 'Package-dir' in setup_info:
+ package_dir = setup_info['Package-dir']
+ else:
+ package_dir = {}
+
+ class PackageDir(distutils.command.build_py.build_py):
+ def __init__(self, package_dir):
+ self.package_dir = package_dir
+
+ pd = PackageDir(package_dir)
+ to_scan = []
+ if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
+ if 'Py-modules' in setup_info:
+ for module in setup_info['Py-modules']:
+ try:
+ package, module = module.rsplit('.', 1)
+ except ValueError:
+ package, module = '.', module
+ module_path = os.path.join(pd.get_package_dir(package), module + '.py')
+ to_scan.append(module_path)
+
+ if 'Packages' in setup_info:
+ for package in setup_info['Packages']:
+ to_scan.append(pd.get_package_dir(package))
+
+ if 'Scripts' in setup_info:
+ to_scan.extend(setup_info['Scripts'])
+ else:
+ logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
+
+ if not to_scan:
+ to_scan = ['.']
+
+ logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
+ mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
+ for dep in scanned_deps:
+ mapped = provided_packages.get(dep)
+ if mapped:
+ logger.debug('Mapped %s to %s' % (dep, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug('Could not map %s' % dep)
+ unmapped_deps.add(dep)
+ return mapped_deps, unmapped_deps
+
+ def scan_python_dependencies(self, paths):
+ deps = set()
+ try:
+ dep_output = self.run_command(['pythondeps', '-d'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ for line in dep_output.splitlines():
+ line = line.rstrip()
+ dep, filename = line.split('\t', 1)
+ if filename.endswith('/setup.py'):
+ continue
+ deps.add(dep)
+
+ try:
+ provides_output = self.run_command(['pythondeps', '-p'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ provides_lines = (l.rstrip() for l in provides_output.splitlines())
+ provides = set(l for l in provides_lines if l and l != 'setup')
+ deps -= provides
+
+ return deps
+
+ def parse_pkgdata_for_python_packages(self):
+ suffixes = [t[0] for t in imp.get_suffixes()]
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+
+ ldata = tinfoil.config_data.createCopy()
+ bb.parse.handle('classes/python-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+
+ dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
+ python_dirs = [python_sitedir + os.sep,
+ os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
+ os.path.dirname(python_sitedir) + os.sep]
+ packages = {}
+ for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
+ files_info = None
+ with open(pkgdatafile, 'r') as f:
+ for line in f.readlines():
+ field, value = line.split(': ', 1)
+ if field == 'FILES_INFO':
+ files_info = ast.literal_eval(value)
+ break
+ else:
+ continue
+
+ for fn in files_info:
+ for suffix in suffixes:
+ if fn.endswith(suffix):
+ break
+ else:
+ continue
+
+ if fn.startswith(dynload_dir + os.sep):
+ if '/.debug/' in fn:
+ continue
+ base = os.path.basename(fn)
+ provided = base.split('.', 1)[0]
+ packages[provided] = os.path.basename(pkgdatafile)
+ continue
+
+ for python_dir in python_dirs:
+ if fn.startswith(python_dir):
+ relpath = fn[len(python_dir):]
+ relstart, _, relremaining = relpath.partition(os.sep)
+ if relstart.endswith('.egg'):
+ relpath = relremaining
+ base, _ = os.path.splitext(relpath)
+
+ if '/.debug/' in base:
+ continue
+ if os.path.basename(base) == '__init__':
+ base = os.path.dirname(base)
+ base = base.replace(os.sep + os.sep, os.sep)
+ provided = base.replace(os.sep, '.')
+ packages[provided] = os.path.basename(pkgdatafile)
+ return packages
+
+ @classmethod
+ def run_command(cls, cmd, **popenargs):
+ if 'stderr' not in popenargs:
+ popenargs['stderr'] = subprocess.STDOUT
+ try:
+ return subprocess.check_output(cmd, **popenargs).decode('utf-8')
+ except OSError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
+ raise
+ except subprocess.CalledProcessError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
+ raise
+
+
+def gather_setup_info(fileobj):
+ parsed = ast.parse(fileobj.read(), fileobj.name)
+ visitor = SetupScriptVisitor()
+ visitor.visit(parsed)
+
+ non_literals, extensions = {}, []
+ for key, value in list(visitor.keywords.items()):
+ if key == 'ext_modules':
+ if isinstance(value, list):
+ for ext in value:
+ if (isinstance(ext, ast.Call) and
+ isinstance(ext.func, ast.Name) and
+ ext.func.id == 'Extension' and
+ not has_non_literals(ext.args)):
+ extensions.append(ext.args[0])
+ elif has_non_literals(value):
+ non_literals[key] = value
+ del visitor.keywords[key]
+
+ return visitor.keywords, visitor.imported_modules, non_literals, extensions
+
+
+class SetupScriptVisitor(ast.NodeVisitor):
+ def __init__(self):
+ ast.NodeVisitor.__init__(self)
+ self.keywords = {}
+ self.non_literals = []
+ self.imported_modules = set()
+
+ def visit_Expr(self, node):
+ if isinstance(node.value, ast.Call) and \
+ isinstance(node.value.func, ast.Name) and \
+ node.value.func.id == 'setup':
+ self.visit_setup(node.value)
+
+ def visit_setup(self, node):
+ call = LiteralAstTransform().visit(node)
+ self.keywords = call.keywords
+ for k, v in self.keywords.items():
+ if has_non_literals(v):
+ self.non_literals.append(k)
+
+ def visit_Import(self, node):
+ for alias in node.names:
+ self.imported_modules.add(alias.name)
+
+ def visit_ImportFrom(self, node):
+ self.imported_modules.add(node.module)
+
+
+class LiteralAstTransform(ast.NodeTransformer):
+ """Simplify the ast through evaluation of literals."""
+ excluded_fields = ['ctx']
+
+ def visit(self, node):
+ if not isinstance(node, ast.AST):
+ return node
+ else:
+ return ast.NodeTransformer.visit(self, node)
+
+ def generic_visit(self, node):
+ try:
+ return ast.literal_eval(node)
+ except ValueError:
+ for field, value in ast.iter_fields(node):
+ if field in self.excluded_fields:
+ delattr(node, field)
+ if value is None:
+ continue
+
+ if isinstance(value, list):
+ if field in ('keywords', 'kwargs'):
+ new_value = dict((kw.arg, self.visit(kw.value)) for kw in value)
+ else:
+ new_value = [self.visit(i) for i in value]
+ else:
+ new_value = self.visit(value)
+ setattr(node, field, new_value)
+ return node
+
+ def visit_Name(self, node):
+ if hasattr('__builtins__', node.id):
+ return getattr(__builtins__, node.id)
+ else:
+ return self.generic_visit(node)
+
+ def visit_Tuple(self, node):
+ return tuple(self.visit(v) for v in node.elts)
+
+ def visit_List(self, node):
+ return [self.visit(v) for v in node.elts]
+
+ def visit_Set(self, node):
+ return set(self.visit(v) for v in node.elts)
+
+ def visit_Dict(self, node):
+ keys = (self.visit(k) for k in node.keys)
+ values = (self.visit(v) for v in node.values)
+ return dict(zip(keys, values))
+
+
+def has_non_literals(value):
+ if isinstance(value, ast.AST):
+ return True
+ elif isinstance(value, str):
+ return False
+ elif hasattr(value, 'values'):
+ return any(has_non_literals(v) for v in value.values())
+ elif hasattr(value, '__iter__'):
+ return any(has_non_literals(v) for v in value)
+
+
+def register_recipe_handlers(handlers):
+ # We need to make sure this is ahead of the makefile fallback handler
+ handlers.append((PythonRecipeHandler(), 70))
diff --git a/poky/scripts/lib/recipetool/create_kernel.py b/poky/scripts/lib/recipetool/create_kernel.py
new file mode 100644
index 000000000..ca4996c7a
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create_kernel.py
@@ -0,0 +1,99 @@
+# Recipe creation tool - kernel support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class KernelRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.process
+ if 'buildsystem' in handled:
+ return False
+
+ for tell in ['arch', 'firmware', 'Kbuild', 'Kconfig']:
+ if not os.path.exists(os.path.join(srctree, tell)):
+ return False
+
+ handled.append('buildsystem')
+ del lines_after[:]
+ del classes[:]
+ template = os.path.join(tinfoil.config_data.getVar('COREBASE'), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
+ def handle_var(varname, origvalue, op, newlines):
+ if varname in ['SRCREV', 'SRCREV_machine']:
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ try:
+ stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree, shell=True)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip(), op, 0, True
+ elif varname == 'LINUX_VERSION':
+ makefile = os.path.join(srctree, 'Makefile')
+ if os.path.exists(makefile):
+ kversion = -1
+ kpatchlevel = -1
+ ksublevel = -1
+ kextraversion = ''
+ with open(makefile, 'r', errors='surrogateescape') as f:
+ for i, line in enumerate(f):
+ if i > 10:
+ break
+ if line.startswith('VERSION ='):
+ kversion = int(line.split('=')[1].strip())
+ elif line.startswith('PATCHLEVEL ='):
+ kpatchlevel = int(line.split('=')[1].strip())
+ elif line.startswith('SUBLEVEL ='):
+ ksublevel = int(line.split('=')[1].strip())
+ elif line.startswith('EXTRAVERSION ='):
+ kextraversion = line.split('=')[1].strip()
+ version = ''
+ if kversion > -1 and kpatchlevel > -1:
+ version = '%d.%d' % (kversion, kpatchlevel)
+ if ksublevel > -1:
+ version += '.%d' % ksublevel
+ version += kextraversion
+ if version:
+ return version, op, 0, True
+ elif varname == 'SRC_URI':
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ elif varname == 'COMPATIBLE_MACHINE':
+ while newlines[-1].startswith('#'):
+ del newlines[-1]
+ machine = tinfoil.config_data.getVar('MACHINE')
+ return machine, op, 0, True
+ return origvalue, op, 0, True
+ with open(template, 'r') as f:
+ varlist = ['SRCREV', 'SRCREV_machine', 'SRC_URI', 'LINUX_VERSION', 'COMPATIBLE_MACHINE']
+ (_, newlines) = bb.utils.edit_metadata(f, varlist, handle_var)
+ lines_before[:] = [line.rstrip('\n') for line in newlines]
+
+ return True
+
+def register_recipe_handlers(handlers):
+ handlers.append((KernelRecipeHandler(), 100))
diff --git a/poky/scripts/lib/recipetool/create_kmod.py b/poky/scripts/lib/recipetool/create_kmod.py
new file mode 100644
index 000000000..4569b53c8
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create_kmod.py
@@ -0,0 +1,152 @@
+# Recipe creation tool - kernel module support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import re
+import logging
+from recipetool.create import RecipeHandler, read_pkgconfig_provides, validate_pv
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class KernelModuleRecipeHandler(RecipeHandler):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.process
+ if 'buildsystem' in handled:
+ return False
+
+ module_inc_re = re.compile(r'^#include\s+<linux/module.h>$')
+ makefiles = []
+ is_module = False
+
+ makefiles = []
+
+ files = RecipeHandler.checkfiles(srctree, ['*.c', '*.h'], recursive=True, excludedirs=['contrib', 'test', 'examples'])
+ if files:
+ for cfile in files:
+ # Look in same dir or parent for Makefile
+ for makefile in [os.path.join(os.path.dirname(cfile), 'Makefile'), os.path.join(os.path.dirname(os.path.dirname(cfile)), 'Makefile')]:
+ if makefile in makefiles:
+ break
+ else:
+ if os.path.exists(makefile):
+ makefiles.append(makefile)
+ break
+ else:
+ continue
+ with open(cfile, 'r', errors='surrogateescape') as f:
+ for line in f:
+ if module_inc_re.match(line.strip()):
+ is_module = True
+ break
+ if is_module:
+ break
+
+ if is_module:
+ classes.append('module')
+ handled.append('buildsystem')
+ # module.bbclass and the classes it inherits do most of the hard
+ # work, but we need to tweak it slightly depending on what the
+ # Makefile does (and there is a range of those)
+ # Check the makefile for the appropriate install target
+ install_lines = []
+ compile_lines = []
+ in_install = False
+ in_compile = False
+ install_target = None
+ with open(makefile, 'r', errors='surrogateescape') as f:
+ for line in f:
+ if line.startswith('install:'):
+ if not install_lines:
+ in_install = True
+ install_target = 'install'
+ elif line.startswith('modules_install:'):
+ install_lines = []
+ in_install = True
+ install_target = 'modules_install'
+ elif line.startswith('modules:'):
+ compile_lines = []
+ in_compile = True
+ elif line.startswith(('all:', 'default:')):
+ if not compile_lines:
+ in_compile = True
+ elif line:
+ if line[0] == '\t':
+ if in_install:
+ install_lines.append(line)
+ elif in_compile:
+ compile_lines.append(line)
+ elif ':' in line:
+ in_install = False
+ in_compile = False
+
+ def check_target(lines, install):
+ kdirpath = ''
+ manual_install = False
+ for line in lines:
+ splitline = line.split()
+ if splitline[0] in ['make', 'gmake', '$(MAKE)']:
+ if '-C' in splitline:
+ idx = splitline.index('-C') + 1
+ if idx < len(splitline):
+ kdirpath = splitline[idx]
+ break
+ elif install and splitline[0] == 'install':
+ if '.ko' in line:
+ manual_install = True
+ return kdirpath, manual_install
+
+ kdirpath = None
+ manual_install = False
+ if install_lines:
+ kdirpath, manual_install = check_target(install_lines, install=True)
+ if compile_lines and not kdirpath:
+ kdirpath, _ = check_target(compile_lines, install=False)
+
+ if manual_install or not install_lines:
+ lines_after.append('EXTRA_OEMAKE_append_task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
+ elif install_target and install_target != 'modules_install':
+ lines_after.append('MODULES_INSTALL_TARGET = "install"')
+
+ warnmsg = None
+ kdirvar = None
+ if kdirpath:
+ res = re.match(r'\$\(([^$)]+)\)', kdirpath)
+ if res:
+ kdirvar = res.group(1)
+ if kdirvar != 'KERNEL_SRC':
+ lines_after.append('EXTRA_OEMAKE += "%s=${STAGING_KERNEL_DIR}"' % kdirvar)
+ elif kdirpath.startswith('/lib/'):
+ warnmsg = 'Kernel path in install makefile is hardcoded - you will need to patch the makefile'
+ if not kdirvar and not warnmsg:
+ warnmsg = 'Unable to find means of passing kernel path into install makefile - if kernel path is hardcoded you will need to patch the makefile'
+ if warnmsg:
+ warnmsg += '. Note that the variable KERNEL_SRC will be passed in as the kernel source path.'
+ logger.warn(warnmsg)
+ lines_after.append('# %s' % warnmsg)
+
+ return True
+
+ return False
+
+def register_recipe_handlers(handlers):
+ handlers.append((KernelModuleRecipeHandler(), 15))
diff --git a/poky/scripts/lib/recipetool/create_npm.py b/poky/scripts/lib/recipetool/create_npm.py
new file mode 100644
index 000000000..bb42a5ca5
--- /dev/null
+++ b/poky/scripts/lib/recipetool/create_npm.py
@@ -0,0 +1,330 @@
+# Recipe creation tool - node.js NPM module support plugin
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import logging
+import subprocess
+import tempfile
+import shutil
+import json
+from recipetool.create import RecipeHandler, split_pkg_licenses, handle_license_vars
+
+logger = logging.getLogger('recipetool')
+
+
+tinfoil = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class NpmRecipeHandler(RecipeHandler):
+ lockdownpath = None
+
+ def _ensure_npm(self, fixed_setup=False):
+ if not tinfoil.recipes_parsed:
+ tinfoil.parse_recipes()
+ try:
+ rd = tinfoil.parse_recipe('nodejs-native')
+ except bb.providers.NoProvider:
+ if fixed_setup:
+ msg = 'nodejs-native is required for npm but is not available within this SDK'
+ else:
+ msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
+ logger.error(msg)
+ return None
+ bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+ npmpath = os.path.join(bindir, 'npm')
+ if not os.path.exists(npmpath):
+ tinfoil.build_targets('nodejs-native', 'addto_recipe_sysroot')
+ if not os.path.exists(npmpath):
+ logger.error('npm required to process specified source, but nodejs-native did not seem to populate it')
+ return None
+ return bindir
+
+ def _handle_license(self, data):
+ '''
+ Handle the license value from an npm package.json file
+ '''
+ license = None
+ if 'license' in data:
+ license = data['license']
+ if isinstance(license, dict):
+ license = license.get('type', None)
+ if license:
+ if 'OR' in license:
+ license = license.replace('OR', '|')
+ license = license.replace('AND', '&')
+ license = license.replace(' ', '_')
+ if not license[0] == '(':
+ license = '(' + license + ')'
+ else:
+ license = license.replace('AND', '&')
+ if license[0] == '(':
+ license = license[1:]
+ if license[-1] == ')':
+ license = license[:-1]
+ license = license.replace('MIT/X11', 'MIT')
+ license = license.replace('Public Domain', 'PD')
+ license = license.replace('SEE LICENSE IN EULA',
+ 'SEE-LICENSE-IN-EULA')
+ return license
+
+ def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
+ try:
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
+ bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ except bb.process.ExecutionError as e:
+ logger.warn('npm shrinkwrap failed:\n%s' % e.stdout)
+ return
+
+ tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
+ shutil.move(os.path.join(srctree, 'npm-shrinkwrap.json'), tmpfile)
+ extravalues.setdefault('extrafiles', {})
+ extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
+ lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
+
+ def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
+ if not NpmRecipeHandler.lockdownpath:
+ NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
+ bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
+ cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
+ if not os.path.exists(relockbin):
+ logger.warn('Could not find relock.js within lockdown directory; skipping lockdown')
+ return
+ try:
+ bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ except bb.process.ExecutionError as e:
+ logger.warn('lockdown-relock failed:\n%s' % e.stdout)
+ return
+
+ tmpfile = os.path.join(localfilesdir, 'lockdown.json')
+ shutil.move(os.path.join(srctree, 'lockdown.json'), tmpfile)
+ extravalues.setdefault('extrafiles', {})
+ extravalues['extrafiles']['lockdown.json'] = tmpfile
+ lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
+
+ def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
+ import scriptutils
+ # If this isn't a single module we need to get the dependencies
+ # and add them to SRC_URI
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'SRC_URI':
+ if not origvalue.startswith('npm://'):
+ src_uri = origvalue.split()
+ deplist = {}
+ for dep, depver in optdeps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ if self.check_npm_optional_dependency(depdata):
+ deplist[dep] = depdata
+ for dep, depver in devdeps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ if self.check_npm_optional_dependency(depdata):
+ deplist[dep] = depdata
+ for dep, depver in deps.items():
+ depdata = self.get_npm_data(dep, depver, d)
+ deplist[dep] = depdata
+
+ extra_urls = []
+ for dep, depdata in deplist.items():
+ version = depdata.get('version', None)
+ if version:
+ url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
+ extra_urls.append(url)
+ if extra_urls:
+ scriptutils.fetch_url(tinfoil, ' '.join(extra_urls), None, srctree, logger)
+ src_uri.extend(extra_urls)
+ return src_uri, None, -1, True
+ return origvalue, None, 0, True
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
+ if updated:
+ del lines_before[:]
+ for line in newlines:
+ # Hack to avoid newlines that edit_metadata inserts
+ if line.endswith('\n'):
+ line = line[:-1]
+ lines_before.append(line)
+ return updated
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ import bb.utils
+ import oe.package
+ from collections import OrderedDict
+
+ if 'buildsystem' in handled:
+ return False
+
+ def read_package_json(fn):
+ with open(fn, 'r', errors='surrogateescape') as f:
+ return json.loads(f.read())
+
+ files = RecipeHandler.checkfiles(srctree, ['package.json'])
+ if files:
+ d = bb.data.createCopy(tinfoil.config_data)
+ npm_bindir = self._ensure_npm()
+ if not npm_bindir:
+ sys.exit(14)
+ d.prependVar('PATH', '%s:' % npm_bindir)
+
+ data = read_package_json(files[0])
+ if 'name' in data and 'version' in data:
+ extravalues['PN'] = data['name']
+ extravalues['PV'] = data['version']
+ classes.append('npm')
+ handled.append('buildsystem')
+ if 'description' in data:
+ extravalues['SUMMARY'] = data['description']
+ if 'homepage' in data:
+ extravalues['HOMEPAGE'] = data['homepage']
+
+ fetchdev = extravalues['fetchdev'] or None
+ deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
+ self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
+
+ # Shrinkwrap
+ localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
+ self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
+
+ # Lockdown
+ self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
+
+ # Split each npm module out to is own package
+ npmpackages = oe.package.npm_split_package_dirs(srctree)
+ licvalues = None
+ for item in handled:
+ if isinstance(item, tuple):
+ if item[0] == 'license':
+ licvalues = item[1]
+ break
+ if not licvalues:
+ licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
+ if licvalues:
+ # Augment the license list with information we have in the packages
+ licenses = {}
+ license = self._handle_license(data)
+ if license:
+ licenses['${PN}'] = license
+ for pkgname, pkgitem in npmpackages.items():
+ _, pdata = pkgitem
+ license = self._handle_license(pdata)
+ if license:
+ licenses[pkgname] = license
+ # Now write out the package-specific license values
+ # We need to strip out the json data dicts for this since split_pkg_licenses
+ # isn't expecting it
+ packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
+ packages['${PN}'] = ''
+ pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
+ all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
+ if '&' in all_licenses:
+ all_licenses.remove('&')
+ extravalues['LICENSE'] = ' & '.join(all_licenses)
+
+ # Need to move S setting after inherit npm
+ for i, line in enumerate(lines_before):
+ if line.startswith('S ='):
+ lines_before.pop(i)
+ lines_after.insert(0, '# Must be set after inherit npm since that itself sets S')
+ lines_after.insert(1, line)
+ break
+
+ return True
+
+ return False
+
+ # FIXME this is duplicated from lib/bb/fetch2/npm.py
+ def _parse_view(self, output):
+ '''
+ Parse the output of npm view --json; the last JSON result
+ is assumed to be the one that we're interested in.
+ '''
+ pdata = None
+ outdeps = {}
+ datalines = []
+ bracelevel = 0
+ for line in output.splitlines():
+ if bracelevel:
+ datalines.append(line)
+ elif '{' in line:
+ datalines = []
+ datalines.append(line)
+ bracelevel = bracelevel + line.count('{') - line.count('}')
+ if datalines:
+ pdata = json.loads('\n'.join(datalines))
+ return pdata
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def get_npm_data(self, pkg, version, d):
+ import bb.fetch2
+ pkgfullname = pkg
+ if version != '*' and not '/' in version:
+ pkgfullname += "@'%s'" % version
+ logger.debug(2, "Calling getdeps on %s" % pkg)
+ runenv = dict(os.environ, PATH=d.getVar('PATH'))
+ fetchcmd = "npm view %s --json" % pkgfullname
+ output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
+ data = self._parse_view(output)
+ return data
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def get_npm_package_dependencies(self, pdata, fetchdev):
+ dependencies = pdata.get('dependencies', {})
+ optionalDependencies = pdata.get('optionalDependencies', {})
+ dependencies.update(optionalDependencies)
+ if fetchdev:
+ devDependencies = pdata.get('devDependencies', {})
+ dependencies.update(devDependencies)
+ else:
+ devDependencies = {}
+ depsfound = {}
+ optdepsfound = {}
+ devdepsfound = {}
+ for dep in dependencies:
+ if dep in optionalDependencies:
+ optdepsfound[dep] = dependencies[dep]
+ elif dep in devDependencies:
+ devdepsfound[dep] = dependencies[dep]
+ else:
+ depsfound[dep] = dependencies[dep]
+ return depsfound, optdepsfound, devdepsfound
+
+ # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+ # (split out from _getdependencies())
+ def check_npm_optional_dependency(self, pdata):
+ pkg_os = pdata.get('os', None)
+ if pkg_os:
+ if not isinstance(pkg_os, list):
+ pkg_os = [pkg_os]
+ blacklist = False
+ for item in pkg_os:
+ if item.startswith('!'):
+ blacklist = True
+ break
+ if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
+ pkg = pdata.get('name', 'Unnamed package')
+ logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
+ return False
+ return True
+
+
+def register_recipe_handlers(handlers):
+ handlers.append((NpmRecipeHandler(), 60))
diff --git a/poky/scripts/lib/recipetool/newappend.py b/poky/scripts/lib/recipetool/newappend.py
new file mode 100644
index 000000000..decce83fa
--- /dev/null
+++ b/poky/scripts/lib/recipetool/newappend.py
@@ -0,0 +1,89 @@
+# Recipe creation tool - newappend plugin
+#
+# This sub-command creates a bbappend for the specified target and prints the
+# path to the bbappend.
+#
+# Example: recipetool newappend meta-mylayer busybox
+#
+# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import argparse
+import errno
+import logging
+import os
+import re
+import subprocess
+import sys
+import scriptutils
+
+
+logger = logging.getLogger('recipetool')
+tinfoil = None
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+def layer(layerpath):
+ if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
+ raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
+ return layerpath
+
+
+def newappend(args):
+ import oe.recipeutils
+
+ recipe_path = tinfoil.get_recipe_file(args.target)
+
+ rd = tinfoil.config_data.createCopy()
+ rd.setVar('FILE', recipe_path)
+ append_path, path_ok = oe.recipeutils.get_bbappend_path(rd, args.destlayer, args.wildcard_version)
+ if not append_path:
+ logger.error('Unable to determine layer directory containing %s', recipe_path)
+ return 1
+
+ if not path_ok:
+ logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
+
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
+ if not os.path.abspath(args.destlayer) in layerdirs:
+ logger.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+
+ if not os.path.exists(append_path):
+ bb.utils.mkdirhier(os.path.dirname(append_path))
+
+ try:
+ open(append_path, 'a').close()
+ except (OSError, IOError) as exc:
+ logger.critical(str(exc))
+ return 1
+
+ if args.edit:
+ return scriptutils.run_editor([append_path, recipe_path], logger)
+ else:
+ print(append_path)
+
+
+def register_commands(subparsers):
+ parser = subparsers.add_parser('newappend',
+ help='Create a bbappend for the specified target in the specified layer')
+ parser.add_argument('-e', '--edit', help='Edit the new append. This obeys $VISUAL if set, otherwise $EDITOR, otherwise vi.', action='store_true')
+ parser.add_argument('-w', '--wildcard-version', help='Use wildcard to make the bbappend apply to any recipe version', action='store_true')
+ parser.add_argument('destlayer', help='Base directory of the destination layer to write the bbappend to', type=layer)
+ parser.add_argument('target', help='Target recipe/provide to append')
+ parser.set_defaults(func=newappend, parserecipes=True)
diff --git a/poky/scripts/lib/recipetool/setvar.py b/poky/scripts/lib/recipetool/setvar.py
new file mode 100644
index 000000000..9de315a0e
--- /dev/null
+++ b/poky/scripts/lib/recipetool/setvar.py
@@ -0,0 +1,75 @@
+# Recipe creation tool - set variable plugin
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import fnmatch
+import re
+import logging
+import scriptutils
+
+logger = logging.getLogger('recipetool')
+
+tinfoil = None
+plugins = None
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+def setvar(args):
+ import oe.recipeutils
+
+ if args.delete:
+ if args.value:
+ logger.error('-D/--delete and specifying a value are mutually exclusive')
+ return 1
+ value = None
+ else:
+ if args.value is None:
+ logger.error('You must specify a value if not using -D/--delete')
+ return 1
+ value = args.value
+ varvalues = {args.varname: value}
+
+ if args.recipe_only:
+ patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
+ else:
+ rd = tinfoil.parse_recipe_file(args.recipefile, False)
+ if not rd:
+ return 1
+ patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
+ if args.patch:
+ for patch in patches:
+ for line in patch:
+ sys.stdout.write(line)
+ return 0
+
+
+def register_commands(subparsers):
+ parser_setvar = subparsers.add_parser('setvar',
+ help='Set a variable within a recipe',
+ description='Adds/updates the value a variable is set to in a recipe')
+ parser_setvar.add_argument('recipefile', help='Recipe file to update')
+ parser_setvar.add_argument('varname', help='Variable name to set')
+ parser_setvar.add_argument('value', nargs='?', help='New value to set the variable to')
+ parser_setvar.add_argument('--recipe-only', '-r', help='Do not set variable in any include file if present', action='store_true')
+ parser_setvar.add_argument('--patch', '-p', help='Create a patch to make the change instead of modifying the recipe', action='store_true')
+ parser_setvar.add_argument('--delete', '-D', help='Delete the specified value instead of setting it', action='store_true')
+ parser_setvar.set_defaults(func=setvar)
diff --git a/poky/scripts/lib/scriptpath.py b/poky/scripts/lib/scriptpath.py
new file mode 100644
index 000000000..d00317e18
--- /dev/null
+++ b/poky/scripts/lib/scriptpath.py
@@ -0,0 +1,42 @@
+# Path utility functions for OE python scripts
+#
+# Copyright (C) 2012-2014 Intel Corporation
+# Copyright (C) 2011 Mentor Graphics Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import os.path
+
+def add_oe_lib_path():
+ basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
+ newpath = basepath + '/meta/lib'
+ sys.path.insert(0, newpath)
+
+def add_bitbake_lib_path():
+ basepath = os.path.abspath(os.path.dirname(__file__) + '/../..')
+ bitbakepath = None
+ if os.path.exists(basepath + '/bitbake/lib/bb'):
+ bitbakepath = basepath + '/bitbake'
+ else:
+ # look for bitbake/bin dir in PATH
+ for pth in os.environ['PATH'].split(':'):
+ if os.path.exists(os.path.join(pth, '../lib/bb')):
+ bitbakepath = os.path.abspath(os.path.join(pth, '..'))
+ break
+
+ if bitbakepath:
+ sys.path.insert(0, bitbakepath + '/lib')
+ return bitbakepath
diff --git a/poky/scripts/lib/scriptutils.py b/poky/scripts/lib/scriptutils.py
new file mode 100644
index 000000000..85b1c949b
--- /dev/null
+++ b/poky/scripts/lib/scriptutils.py
@@ -0,0 +1,241 @@
+# Script utility functions
+#
+# Copyright (C) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import logging
+import glob
+import argparse
+import subprocess
+import tempfile
+import shutil
+import random
+import string
+
+def logger_create(name, stream=None):
+ logger = logging.getLogger(name)
+ loggerhandler = logging.StreamHandler(stream=stream)
+ loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(loggerhandler)
+ logger.setLevel(logging.INFO)
+ return logger
+
+def logger_setup_color(logger, color='auto'):
+ from bb.msg import BBLogFormatter
+ console = logging.StreamHandler(sys.stdout)
+ formatter = BBLogFormatter("%(levelname)s: %(message)s")
+ console.setFormatter(formatter)
+ logger.handlers = [console]
+ if color == 'always' or (color=='auto' and console.stream.isatty()):
+ formatter.enable_color()
+
+
+def load_plugins(logger, plugins, pluginpath):
+ import imp
+
+ def load_plugin(name):
+ logger.debug('Loading plugin %s' % name)
+ fp, pathname, description = imp.find_module(name, [pluginpath])
+ try:
+ return imp.load_module(name, fp, pathname, description)
+ finally:
+ if fp:
+ fp.close()
+
+ def plugin_name(filename):
+ return os.path.splitext(os.path.basename(filename))[0]
+
+ known_plugins = [plugin_name(p.__name__) for p in plugins]
+ logger.debug('Loading plugins from %s...' % pluginpath)
+ for fn in glob.glob(os.path.join(pluginpath, '*.py')):
+ name = plugin_name(fn)
+ if name != '__init__' and name not in known_plugins:
+ plugin = load_plugin(name)
+ if hasattr(plugin, 'plugin_init'):
+ plugin.plugin_init(plugins)
+ plugins.append(plugin)
+
+def git_convert_standalone_clone(repodir):
+ """If specified directory is a git repository, ensure it's a standalone clone"""
+ import bb.process
+ if os.path.exists(os.path.join(repodir, '.git')):
+ alternatesfile = os.path.join(repodir, '.git', 'objects', 'info', 'alternates')
+ if os.path.exists(alternatesfile):
+ # This will have been cloned with -s, so we need to convert it so none
+ # of the contents is shared
+ bb.process.run('git repack -a', cwd=repodir)
+ os.remove(alternatesfile)
+
+def _get_temp_recipe_dir(d):
+ # This is a little bit hacky but we need to find a place where we can put
+ # the recipe so that bitbake can find it. We're going to delete it at the
+ # end so it doesn't really matter where we put it.
+ bbfiles = d.getVar('BBFILES').split()
+ fetchrecipedir = None
+ for pth in bbfiles:
+ if pth.endswith('.bb'):
+ pthdir = os.path.dirname(pth)
+ if os.access(os.path.dirname(os.path.dirname(pthdir)), os.W_OK):
+ fetchrecipedir = pthdir.replace('*', 'recipetool')
+ if pthdir.endswith('workspace/recipes/*'):
+ # Prefer the workspace
+ break
+ return fetchrecipedir
+
+class FetchUrlFailure(Exception):
+ def __init__(self, url):
+ self.url = url
+ def __str__(self):
+ return "Failed to fetch URL %s" % self.url
+
+def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirrors=False):
+ """
+ Fetch the specified URL using normal do_fetch and do_unpack tasks, i.e.
+ any dependencies that need to be satisfied in order to support the fetch
+ operation will be taken care of
+ """
+
+ import bb
+
+ checksums = {}
+ fetchrecipepn = None
+
+ # We need to put our temp directory under ${BASE_WORKDIR} otherwise
+ # we may have problems with the recipe-specific sysroot population
+ tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
+ bb.utils.mkdirhier(tmpparent)
+ tmpdir = tempfile.mkdtemp(prefix='recipetool-', dir=tmpparent)
+ try:
+ tmpworkdir = os.path.join(tmpdir, 'work')
+ logger.debug('fetch_url: temp dir is %s' % tmpdir)
+
+ fetchrecipedir = _get_temp_recipe_dir(tinfoil.config_data)
+ if not fetchrecipedir:
+ logger.error('Searched BBFILES but unable to find a writeable place to put temporary recipe')
+ sys.exit(1)
+ fetchrecipe = None
+ bb.utils.mkdirhier(fetchrecipedir)
+ try:
+ # Generate a dummy recipe so we can follow more or less normal paths
+ # for do_fetch and do_unpack
+ # I'd use tempfile functions here but underscores can be produced by that and those
+ # aren't allowed in recipe file names except to separate the version
+ rndstring = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8))
+ fetchrecipe = os.path.join(fetchrecipedir, 'tmp-recipetool-%s.bb' % rndstring)
+ fetchrecipepn = os.path.splitext(os.path.basename(fetchrecipe))[0]
+ logger.debug('Generating initial recipe %s for fetching' % fetchrecipe)
+ with open(fetchrecipe, 'w') as f:
+ # We don't want to have to specify LIC_FILES_CHKSUM
+ f.write('LICENSE = "CLOSED"\n')
+ # We don't need the cross-compiler
+ f.write('INHIBIT_DEFAULT_DEPS = "1"\n')
+ # We don't have the checksums yet so we can't require them
+ f.write('BB_STRICT_CHECKSUM = "ignore"\n')
+ f.write('SRC_URI = "%s"\n' % srcuri)
+ f.write('SRCREV = "%s"\n' % srcrev)
+ f.write('WORKDIR = "%s"\n' % tmpworkdir)
+ # Set S out of the way so it doesn't get created under the workdir
+ f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
+ if not mirrors:
+ # We do not need PREMIRRORS since we are almost certainly
+ # fetching new source rather than something that has already
+ # been fetched. Hence, we disable them by default.
+ # However, we provide an option for users to enable it.
+ f.write('PREMIRRORS = ""\n')
+ f.write('MIRRORS = ""\n')
+
+ logger.info('Fetching %s...' % srcuri)
+
+ # FIXME this is too noisy at the moment
+
+ # Parse recipes so our new recipe gets picked up
+ tinfoil.parse_recipes()
+
+ def eventhandler(event):
+ if isinstance(event, bb.fetch2.MissingChecksumEvent):
+ checksums.update(event.checksums)
+ return True
+ return False
+
+ # Run the fetch + unpack tasks
+ res = tinfoil.build_targets(fetchrecipepn,
+ 'do_unpack',
+ handle_events=True,
+ extra_events=['bb.fetch2.MissingChecksumEvent'],
+ event_callback=eventhandler)
+ if not res:
+ raise FetchUrlFailure(srcuri)
+
+ # Remove unneeded directories
+ rd = tinfoil.parse_recipe(fetchrecipepn)
+ if rd:
+ pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
+ for pathvar in pathvars:
+ path = rd.getVar(pathvar)
+ shutil.rmtree(path)
+ finally:
+ if fetchrecipe:
+ try:
+ os.remove(fetchrecipe)
+ except FileNotFoundError:
+ pass
+ try:
+ os.rmdir(fetchrecipedir)
+ except OSError as e:
+ import errno
+ if e.errno != errno.ENOTEMPTY:
+ raise
+
+ bb.utils.mkdirhier(destdir)
+ for fn in os.listdir(tmpworkdir):
+ shutil.move(os.path.join(tmpworkdir, fn), destdir)
+
+ finally:
+ if not preserve_tmp:
+ shutil.rmtree(tmpdir)
+ tmpdir = None
+
+ return checksums, tmpdir
+
+
+def run_editor(fn, logger=None):
+ if isinstance(fn, str):
+ params = '"%s"' % fn
+ else:
+ params = ''
+ for fnitem in fn:
+ params += ' "%s"' % fnitem
+
+ editor = os.getenv('VISUAL', os.getenv('EDITOR', 'vi'))
+ try:
+ return subprocess.check_call('%s %s' % (editor, params), shell=True)
+ except subprocess.CalledProcessError as exc:
+ logger.error("Execution of '%s' failed: %s" % (editor, exc))
+ return 1
+
+def is_src_url(param):
+ """
+ Check if a parameter is a URL and return True if so
+ NOTE: be careful about changing this as it will influence how devtool/recipetool command line handling works
+ """
+ if not param:
+ return False
+ elif '://' in param:
+ return True
+ elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
+ return True
+ return False
diff --git a/poky/scripts/lib/wic/__init__.py b/poky/scripts/lib/wic/__init__.py
new file mode 100644
index 000000000..85876b138
--- /dev/null
+++ b/poky/scripts/lib/wic/__init__.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2007 Red Hat, Inc.
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+class WicError(Exception):
+ pass
diff --git a/poky/scripts/lib/wic/canned-wks/common.wks.inc b/poky/scripts/lib/wic/canned-wks/common.wks.inc
new file mode 100644
index 000000000..89880b417
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/common.wks.inc
@@ -0,0 +1,3 @@
+# This file is included into 3 canned wks files from this directory
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --use-uuid --fstype=ext4 --label platform --align 1024
diff --git a/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg b/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
new file mode 100644
index 000000000..c58e74a85
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
@@ -0,0 +1,27 @@
+# This is an example configuration file for syslinux.
+TIMEOUT 50
+ALLOWOPTIONS 1
+SERIAL 0 115200
+PROMPT 0
+
+UI vesamenu.c32
+menu title Select boot options
+menu tabmsg Press [Tab] to edit, [Return] to select
+
+DEFAULT Graphics console boot
+
+LABEL Graphics console boot
+KERNEL /vmlinuz
+APPEND label=boot rootwait
+
+LABEL Serial console boot
+KERNEL /vmlinuz
+APPEND label=boot rootwait console=ttyS0,115200
+
+LABEL Graphics console install
+KERNEL /vmlinuz
+APPEND label=install rootwait
+
+LABEL Serial console install
+KERNEL /vmlinuz
+APPEND label=install rootwait console=ttyS0,115200
diff --git a/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks b/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks
new file mode 100644
index 000000000..3529e05c8
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.wks
@@ -0,0 +1,8 @@
+# short-description: Create a 'pcbios' direct disk image with custom bootloader config
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media. The bootloader configuration source is a user file.
+
+include common.wks.inc
+
+bootloader --configfile="directdisk-bootloader-config.cfg"
+
diff --git a/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks b/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks
new file mode 100644
index 000000000..8d7d8de6e
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks
@@ -0,0 +1,10 @@
+# short-description: Create a 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media.
+
+
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+
+bootloader --ptable gpt --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
+
diff --git a/poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks b/poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
new file mode 100644
index 000000000..f61d941d6
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/directdisk-multi-rootfs.wks
@@ -0,0 +1,23 @@
+# short-description: Create multi rootfs image using rootfs plugin
+# long-description: Creates a partitioned disk image with two rootfs partitions
+# using rootfs plugin.
+#
+# Partitions can use either
+# - indirect rootfs references to image recipe(s):
+# wic create directdisk-multi-indirect-recipes -e core-image-minimal \
+# --rootfs-dir rootfs1=core-image-minimal
+# --rootfs-dir rootfs2=core-image-minimal-dev
+#
+# - or paths to rootfs directories:
+# wic create directdisk-multi-rootfs \
+# --rootfs-dir rootfs1=tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs/
+# --rootfs-dir rootfs2=tmp/work/qemux86_64-poky-linux/core-image-minimal-dev/1.0-r0/rootfs/
+#
+# - or any combinations of -r and --rootfs command line options
+
+part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+part / --source rootfs --rootfs-dir=rootfs1 --ondisk sda --fstype=ext4 --label platform --align 1024
+part /rescue --source rootfs --rootfs-dir=rootfs2 --ondisk sda --fstype=ext4 --label secondary --align 1024
+
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
+
diff --git a/poky/scripts/lib/wic/canned-wks/directdisk.wks b/poky/scripts/lib/wic/canned-wks/directdisk.wks
new file mode 100644
index 000000000..8c8e06b02
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/directdisk.wks
@@ -0,0 +1,8 @@
+# short-description: Create a 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly dd to boot media.
+
+include common.wks.inc
+
+bootloader --timeout=0 --append="rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0 console=ttyS0,115200n8"
+
diff --git a/poky/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in b/poky/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
new file mode 100644
index 000000000..7300e65e3
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
@@ -0,0 +1,3 @@
+bootloader --ptable gpt
+part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.0
+part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/
diff --git a/poky/scripts/lib/wic/canned-wks/mkefidisk.wks b/poky/scripts/lib/wic/canned-wks/mkefidisk.wks
new file mode 100644
index 000000000..9f534fe18
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/mkefidisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media.
+
+part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/poky/scripts/lib/wic/canned-wks/mkhybridiso.wks b/poky/scripts/lib/wic/canned-wks/mkhybridiso.wks
new file mode 100644
index 000000000..9d34e9b47
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/mkhybridiso.wks
@@ -0,0 +1,7 @@
+# short-description: Create a hybrid ISO image
+# long-description: Creates an EFI and legacy bootable hybrid ISO image
+# which can be used on optical media as well as USB media.
+
+part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi,image_name=HYBRID_ISO_IMG" --ondisk cd --label HYBRIDISO --fstype=ext4
+
+bootloader --timeout=15 --append=""
diff --git a/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
new file mode 100644
index 000000000..1f8466af2
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -0,0 +1,8 @@
+# short-description: Create a qemu machine 'pcbios' direct disk image
+# long-description: Creates a partitioned legacy BIOS disk image that the user
+# can directly use to boot a qemu machine.
+
+include common.wks.inc
+
+bootloader --timeout=0 --append="vga=0 uvesafb.mode_option=640x480-32 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
+
diff --git a/poky/scripts/lib/wic/canned-wks/sdimage-bootpart.wks b/poky/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
new file mode 100644
index 000000000..7ffd632f4
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
@@ -0,0 +1,6 @@
+# short-description: Create SD card image with a boot partition
+# long-description: Creates a partitioned SD card image. Boot files
+# are located in the first vfat partition.
+
+part /boot --source bootimg-partition --ondisk mmcblk --fstype=vfat --label boot --active --align 4 --size 16
+part / --source rootfs --ondisk mmcblk --fstype=ext4 --label root --align 4
diff --git a/poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks b/poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
new file mode 100644
index 000000000..95d7b97a6
--- /dev/null
+++ b/poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks
@@ -0,0 +1,11 @@
+# short-description: Create an EFI disk image with systemd-boot
+# long-description: Creates a partitioned EFI disk image that the user
+# can directly dd to boot media. The selected bootloader is systemd-boot.
+
+part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024 --use-uuid
+
+part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid
+
+part swap --ondisk sda --size 44 --label swap1 --fstype=swap --use-uuid
+
+bootloader --ptable gpt --timeout=5 --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/poky/scripts/lib/wic/engine.py b/poky/scripts/lib/wic/engine.py
new file mode 100644
index 000000000..edcfab39e
--- /dev/null
+++ b/poky/scripts/lib/wic/engine.py
@@ -0,0 +1,565 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+
+# This module implements the image creation engine used by 'wic' to
+# create images. The engine parses through the OpenEmbedded kickstart
+# (wks) file specified and generates images that can then be directly
+# written onto media.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+import tempfile
+import json
+import subprocess
+
+from collections import namedtuple, OrderedDict
+from distutils.spawn import find_executable
+
+from wic import WicError
+from wic.filemap import sparse_copy
+from wic.pluginbase import PluginMgr
+from wic.misc import get_bitbake_var, exec_cmd
+
+logger = logging.getLogger('wic')
+
+def verify_build_env():
+ """
+ Verify that the build environment is sane.
+
+ Returns True if it is, false otherwise
+ """
+ if not os.environ.get("BUILDDIR"):
+ raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
+
+ return True
+
+
+CANNED_IMAGE_DIR = "lib/wic/canned-wks" # relative to scripts
+SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
+WIC_DIR = "wic"
+
+def build_canned_image_list(path):
+ layers_path = get_bitbake_var("BBLAYERS")
+ canned_wks_layer_dirs = []
+
+ if layers_path is not None:
+ for layer_path in layers_path.split():
+ for wks_path in (WIC_DIR, SCRIPTS_CANNED_IMAGE_DIR):
+ cpath = os.path.join(layer_path, wks_path)
+ if os.path.isdir(cpath):
+ canned_wks_layer_dirs.append(cpath)
+
+ cpath = os.path.join(path, CANNED_IMAGE_DIR)
+ canned_wks_layer_dirs.append(cpath)
+
+ return canned_wks_layer_dirs
+
+def find_canned_image(scripts_path, wks_file):
+ """
+ Find a .wks file with the given name in the canned files dir.
+
+ Return False if not found
+ """
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname.endswith("~") or fname.endswith("#"):
+ continue
+ if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ fullpath = os.path.join(canned_wks_dir, fname)
+ return fullpath
+ return None
+
+
+def list_canned_images(scripts_path):
+ """
+ List the .wks files in the canned image dir, minus the extension.
+ """
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname.endswith("~") or fname.endswith("#"):
+ continue
+ if fname.endswith(".wks"):
+ fullpath = os.path.join(canned_wks_dir, fname)
+ with open(fullpath) as wks:
+ for line in wks:
+ desc = ""
+ idx = line.find("short-description:")
+ if idx != -1:
+ desc = line[idx + len("short-description:"):].strip()
+ break
+ basename = os.path.splitext(fname)[0]
+ print(" %s\t\t%s" % (basename.ljust(30), desc))
+
+
+def list_canned_image_help(scripts_path, fullpath):
+ """
+ List the help and params in the specified canned image.
+ """
+ found = False
+ with open(fullpath) as wks:
+ for line in wks:
+ if not found:
+ idx = line.find("long-description:")
+ if idx != -1:
+ print()
+ print(line[idx + len("long-description:"):].strip())
+ found = True
+ continue
+ if not line.strip():
+ break
+ idx = line.find("#")
+ if idx != -1:
+ print(line[idx + len("#:"):].rstrip())
+ else:
+ break
+
+
+def list_source_plugins():
+ """
+ List the available source plugins i.e. plugins available for --source.
+ """
+ plugins = PluginMgr.get_plugins('source')
+
+ for plugin in plugins:
+ print(" %s" % plugin)
+
+
+def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, options):
+ """
+ Create image
+
+ wks_file - user-defined OE kickstart file
+ rootfs_dir - absolute path to the build's /rootfs dir
+ bootimg_dir - absolute path to the build's boot artifacts directory
+ kernel_dir - absolute path to the build's kernel directory
+ native_sysroot - absolute path to the build's native sysroots dir
+ image_output_dir - dirname to create for image
+ options - wic command line options (debug, bmap, etc)
+
+ Normally, the values for the build artifacts values are determined
+ by 'wic -e' from the output of the 'bitbake -e' command given an
+ image name e.g. 'core-image-minimal' and a given machine set in
+ local.conf. If that's the case, the variables get the following
+ values from the output of 'bitbake -e':
+
+ rootfs_dir: IMAGE_ROOTFS
+ kernel_dir: DEPLOY_DIR_IMAGE
+ native_sysroot: STAGING_DIR_NATIVE
+
+ In the above case, bootimg_dir remains unset and the
+ plugin-specific image creation code is responsible for finding the
+ bootimg artifacts.
+
+ In the case where the values are passed in explicitly i.e 'wic -e'
+ is not used but rather the individual 'wic' options are used to
+ explicitly specify these values.
+ """
+ try:
+ oe_builddir = os.environ["BUILDDIR"]
+ except KeyError:
+ raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
+
+ if not os.path.exists(options.outdir):
+ os.makedirs(options.outdir)
+
+ pname = 'direct'
+ plugin_class = PluginMgr.get_plugins('imager').get(pname)
+ if not plugin_class:
+ raise WicError('Unknown plugin: %s' % pname)
+
+ plugin = plugin_class(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, oe_builddir, options)
+
+ plugin.do_create()
+
+ logger.info("The image(s) were created using OE kickstart file:\n %s", wks_file)
+
+
+def wic_list(args, scripts_path):
+ """
+ Print the list of images or source plugins.
+ """
+ if args.list_type is None:
+ return False
+
+ if args.list_type == "images":
+
+ list_canned_images(scripts_path)
+ return True
+ elif args.list_type == "source-plugins":
+ list_source_plugins()
+ return True
+ elif len(args.help_for) == 1 and args.help_for[0] == 'help':
+ wks_file = args.list_type
+ fullpath = find_canned_image(scripts_path, wks_file)
+ if not fullpath:
+ raise WicError("No image named %s found, exiting. "
+ "(Use 'wic list images' to list available images, "
+ "or specify a fully-qualified OE kickstart (.wks) "
+ "filename)" % wks_file)
+
+ list_canned_image_help(scripts_path, fullpath)
+ return True
+
+ return False
+
+
+class Disk:
+ def __init__(self, imagepath, native_sysroot, fstypes=('fat', 'ext')):
+ self.imagepath = imagepath
+ self.native_sysroot = native_sysroot
+ self.fstypes = fstypes
+ self._partitions = None
+ self._partimages = {}
+ self._lsector_size = None
+ self._psector_size = None
+ self._ptable_format = None
+
+ # find parted
+ self.paths = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ if native_sysroot:
+ for path in self.paths.split(':'):
+ self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
+
+ self.parted = find_executable("parted", self.paths)
+ if not self.parted:
+ raise WicError("Can't find executable parted")
+
+ self.partitions = self.get_partitions()
+
+ def __del__(self):
+ for path in self._partimages.values():
+ os.unlink(path)
+
+ def get_partitions(self):
+ if self._partitions is None:
+ self._partitions = OrderedDict()
+ out = exec_cmd("%s -sm %s unit B print" % (self.parted, self.imagepath))
+ parttype = namedtuple("Part", "pnum start end size fstype")
+ splitted = out.splitlines()
+ lsector_size, psector_size, self._ptable_format = splitted[1].split(":")[3:6]
+ self._lsector_size = int(lsector_size)
+ self._psector_size = int(psector_size)
+ for line in splitted[2:]:
+ pnum, start, end, size, fstype = line.split(':')[:5]
+ partition = parttype(int(pnum), int(start[:-1]), int(end[:-1]),
+ int(size[:-1]), fstype)
+ self._partitions[pnum] = partition
+
+ return self._partitions
+
+ def __getattr__(self, name):
+ """Get path to the executable in a lazy way."""
+ if name in ("mdir", "mcopy", "mdel", "mdeltree", "sfdisk", "e2fsck",
+ "resize2fs", "mkswap", "mkdosfs", "debugfs"):
+ aname = "_%s" % name
+ if aname not in self.__dict__:
+ setattr(self, aname, find_executable(name, self.paths))
+ if aname not in self.__dict__:
+ raise WicError("Can't find executable {}".format(name))
+ return self.__dict__[aname]
+ return self.__dict__[name]
+
+ def _get_part_image(self, pnum):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image")
+ part = self.partitions[pnum]
+ # check if fstype is supported
+ for fstype in self.fstypes:
+ if part.fstype.startswith(fstype):
+ break
+ else:
+ raise WicError("Not supported fstype: {}".format(part.fstype))
+ if pnum not in self._partimages:
+ tmpf = tempfile.NamedTemporaryFile(prefix="wic-part")
+ dst_fname = tmpf.name
+ tmpf.close()
+ sparse_copy(self.imagepath, dst_fname, skip=part.start, length=part.size)
+ self._partimages[pnum] = dst_fname
+
+ return self._partimages[pnum]
+
+ def _put_part_image(self, pnum):
+ """Put partition image into partitioned image."""
+ sparse_copy(self._partimages[pnum], self.imagepath,
+ seek=self.partitions[pnum].start)
+
+ def dir(self, pnum, path):
+ if self.partitions[pnum].fstype.startswith('ext'):
+ return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
+ self._get_part_image(pnum),
+ path), as_shell=True)
+ else: # fat
+ return exec_cmd("{} -i {} ::{}".format(self.mdir,
+ self._get_part_image(pnum),
+ path))
+
+ def copy(self, src, pnum, path):
+ """Copy partition image into wic image."""
+ if self.partitions[pnum].fstype.startswith('ext'):
+ cmd = "echo -e 'cd {}\nwrite {} {}' | {} -w {}".\
+ format(path, src, os.path.basename(src),
+ self.debugfs, self._get_part_image(pnum))
+ else: # fat
+ cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src, path)
+ exec_cmd(cmd, as_shell=True)
+ self._put_part_image(pnum)
+
+ def remove(self, pnum, path):
+ """Remove files/dirs from the partition."""
+ partimg = self._get_part_image(pnum)
+ if self.partitions[pnum].fstype.startswith('ext'):
+ exec_cmd("{} {} -wR 'rm {}'".format(self.debugfs,
+ self._get_part_image(pnum),
+ path), as_shell=True)
+ else: # fat
+ cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
+ try:
+ exec_cmd(cmd)
+ except WicError as err:
+ if "not found" in str(err) or "non empty" in str(err):
+ # mdel outputs 'File ... not found' or 'directory .. non empty"
+ # try to use mdeltree as path could be a directory
+ cmd = "{} -i {} ::{}".format(self.mdeltree,
+ partimg, path)
+ exec_cmd(cmd)
+ else:
+ raise err
+ self._put_part_image(pnum)
+
+ def write(self, target, expand):
+ """Write disk image to the media or file."""
+ def write_sfdisk_script(outf, parts):
+ for key, val in parts['partitiontable'].items():
+ if key in ("partitions", "device", "firstlba", "lastlba"):
+ continue
+ if key == "id":
+ key = "label-id"
+ outf.write("{}: {}\n".format(key, val))
+ outf.write("\n")
+ for part in parts['partitiontable']['partitions']:
+ line = ''
+ for name in ('attrs', 'name', 'size', 'type', 'uuid'):
+ if name == 'size' and part['type'] == 'f':
+ # don't write size for extended partition
+ continue
+ val = part.get(name)
+ if val:
+ line += '{}={}, '.format(name, val)
+ if line:
+ line = line[:-2] # strip ', '
+ if part.get('bootable'):
+ line += ' ,bootable'
+ outf.write("{}\n".format(line))
+ outf.flush()
+
+ def read_ptable(path):
+ out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
+ return json.loads(out)
+
+ def write_ptable(parts, target):
+ with tempfile.NamedTemporaryFile(prefix="wic-sfdisk-", mode='w') as outf:
+ write_sfdisk_script(outf, parts)
+ cmd = "{} --no-reread {} < {} 2>/dev/null".format(self.sfdisk, target, outf.name)
+ try:
+ subprocess.check_output(cmd, shell=True)
+ except subprocess.CalledProcessError as err:
+ raise WicError("Can't run '{}' command: {}".format(cmd, err))
+
+ if expand is None:
+ sparse_copy(self.imagepath, target)
+ else:
+ # copy first sectors that may contain bootloader
+ sparse_copy(self.imagepath, target, length=2048 * self._lsector_size)
+
+ # copy source partition table to the target
+ parts = read_ptable(self.imagepath)
+ write_ptable(parts, target)
+
+ # get size of unpartitioned space
+ free = None
+ for line in exec_cmd("{} -F {}".format(self.sfdisk, target)).splitlines():
+ if line.startswith("Unpartitioned space ") and line.endswith("sectors"):
+ free = int(line.split()[-2])
+ if free is None:
+ raise WicError("Can't get size of unpartitioned space")
+
+ # calculate expanded partitions sizes
+ sizes = {}
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ if num in expand:
+ if expand[num] != 0: # don't resize partition if size is set to 0
+ sectors = expand[num] // self._lsector_size
+ free -= sectors - part['size']
+ part['size'] = sectors
+ sizes[num] = sectors
+ elif part['type'] != 'f':
+ sizes[num] = -1
+
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ if sizes.get(num) == -1:
+ part['size'] += free // len(sizes)
+
+ # write resized partition table to the target
+ write_ptable(parts, target)
+
+ # read resized partition table
+ parts = read_ptable(target)
+
+ # copy partitions content
+ for num, part in enumerate(parts['partitiontable']['partitions'], 1):
+ pnum = str(num)
+ fstype = self.partitions[pnum].fstype
+
+ # copy unchanged partition
+ if part['size'] == self.partitions[pnum].size // self._lsector_size:
+ logger.info("copying unchanged partition {}".format(pnum))
+ sparse_copy(self._get_part_image(pnum), target, seek=part['start'] * self._lsector_size)
+ continue
+
+ # resize or re-create partitions
+ if fstype.startswith('ext') or fstype.startswith('fat') or \
+ fstype.startswith('linux-swap'):
+
+ partfname = None
+ with tempfile.NamedTemporaryFile(prefix="wic-part{}-".format(pnum)) as partf:
+ partfname = partf.name
+
+ if fstype.startswith('ext'):
+ logger.info("resizing ext partition {}".format(pnum))
+ partimg = self._get_part_image(pnum)
+ sparse_copy(partimg, partfname)
+ exec_cmd("{} -pf {}".format(self.e2fsck, partfname))
+ exec_cmd("{} {} {}s".format(\
+ self.resize2fs, partfname, part['size']))
+ elif fstype.startswith('fat'):
+ logger.info("copying content of the fat partition {}".format(pnum))
+ with tempfile.TemporaryDirectory(prefix='wic-fatdir-') as tmpdir:
+ # copy content to the temporary directory
+ cmd = "{} -snompi {} :: {}".format(self.mcopy,
+ self._get_part_image(pnum),
+ tmpdir)
+ exec_cmd(cmd)
+ # create new msdos partition
+ label = part.get("name")
+ label_str = "-n {}".format(label) if label else ''
+
+ cmd = "{} {} -C {} {}".format(self.mkdosfs, label_str, partfname,
+ part['size'])
+ exec_cmd(cmd)
+ # copy content from the temporary directory to the new partition
+ cmd = "{} -snompi {} {}/* ::".format(self.mcopy, partfname, tmpdir)
+ exec_cmd(cmd, as_shell=True)
+ elif fstype.startswith('linux-swap'):
+ logger.info("creating swap partition {}".format(pnum))
+ label = part.get("name")
+ label_str = "-L {}".format(label) if label else ''
+ uuid = part.get("uuid")
+ uuid_str = "-U {}".format(uuid) if uuid else ''
+ with open(partfname, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), part['size'] * self._lsector_size)
+ exec_cmd("{} {} {} {}".format(self.mkswap, label_str, uuid_str, partfname))
+ sparse_copy(partfname, target, seek=part['start'] * self._lsector_size)
+ os.unlink(partfname)
+ elif part['type'] != 'f':
+ logger.warn("skipping partition {}: unsupported fstype {}".format(pnum, fstype))
+
+def wic_ls(args, native_sysroot):
+ """List contents of partitioned image or vfat partition."""
+ disk = Disk(args.path.image, native_sysroot)
+ if not args.path.part:
+ if disk.partitions:
+ print('Num Start End Size Fstype')
+ for part in disk.partitions.values():
+ print("{:2d} {:12d} {:12d} {:12d} {}".format(\
+ part.pnum, part.start, part.end,
+ part.size, part.fstype))
+ else:
+ path = args.path.path or '/'
+ print(disk.dir(args.path.part, path))
+
+def wic_cp(args, native_sysroot):
+ """
+ Copy local file or directory to the vfat partition of
+ partitioned image.
+ """
+ disk = Disk(args.dest.image, native_sysroot)
+ disk.copy(args.src, args.dest.part, args.dest.path)
+
+def wic_rm(args, native_sysroot):
+ """
+ Remove files or directories from the vfat partition of
+ partitioned image.
+ """
+ disk = Disk(args.path.image, native_sysroot)
+ disk.remove(args.path.part, args.path.path)
+
+def wic_write(args, native_sysroot):
+ """
+ Write image to a target device.
+ """
+ disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'swap'))
+ disk.write(args.target, args.expand)
+
+def find_canned(scripts_path, file_name):
+ """
+ Find a file either by its path or by name in the canned files dir.
+
+ Return None if not found
+ """
+ if os.path.exists(file_name):
+ return file_name
+
+ layers_canned_wks_dir = build_canned_image_list(scripts_path)
+ for canned_wks_dir in layers_canned_wks_dir:
+ for root, dirs, files in os.walk(canned_wks_dir):
+ for fname in files:
+ if fname == file_name:
+ fullpath = os.path.join(canned_wks_dir, fname)
+ return fullpath
+
+def get_custom_config(boot_file):
+ """
+ Get the custom configuration to be used for the bootloader.
+
+ Return None if the file can't be found.
+ """
+ # Get the scripts path of poky
+ scripts_path = os.path.abspath("%s/../.." % os.path.dirname(__file__))
+
+ cfg_file = find_canned(scripts_path, boot_file)
+ if cfg_file:
+ with open(cfg_file, "r") as f:
+ config = f.read()
+ return config
diff --git a/poky/scripts/lib/wic/filemap.py b/poky/scripts/lib/wic/filemap.py
new file mode 100644
index 000000000..a72fa09ef
--- /dev/null
+++ b/poky/scripts/lib/wic/filemap.py
@@ -0,0 +1,600 @@
+# Copyright (c) 2012 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License, version 2,
+# as published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+"""
+This module implements python implements a way to get file block. Two methods
+are supported - the FIEMAP ioctl and the 'SEEK_HOLE / SEEK_DATA' features of
+the file seek syscall. The former is implemented by the 'FilemapFiemap' class,
+the latter is implemented by the 'FilemapSeek' class. Both classes provide the
+same API. The 'filemap' function automatically selects which class can be used
+and returns an instance of the class.
+"""
+
+# Disable the following pylint recommendations:
+# * Too many instance attributes (R0902)
+# pylint: disable=R0902
+
+import os
+import struct
+import array
+import fcntl
+import tempfile
+import logging
+
+def get_block_size(file_obj):
+ """
+ Returns block size for file object 'file_obj'. Errors are indicated by the
+ 'IOError' exception.
+ """
+ # Get the block size of the host file-system for the image file by calling
+ # the FIGETBSZ ioctl (number 2).
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ bsize = struct.unpack('I', binary_data)[0]
+ if not bsize:
+ import os
+ stat = os.fstat(file_obj.fileno())
+ if hasattr(stat, 'st_blksize'):
+ bsize = stat.st_blksize
+ else:
+ raise IOError("Unable to determine block size")
+ return bsize
+
+class ErrorNotSupp(Exception):
+ """
+ An exception of this type is raised when the 'FIEMAP' or 'SEEK_HOLE' feature
+ is not supported either by the kernel or the file-system.
+ """
+ pass
+
+class Error(Exception):
+ """A class for all the other exceptions raised by this module."""
+ pass
+
+
+class _FilemapBase(object):
+ """
+ This is a base class for a couple of other classes in this module. This
+ class simply performs the common parts of the initialization process: opens
+ the image file, gets its size, etc. The 'log' parameter is the logger object
+ to use for printing messages.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full path to the
+ file or file object to operate on.
+ """
+
+ self._log = log
+ if self._log is None:
+ self._log = logging.getLogger(__name__)
+
+ self._f_image_needs_close = False
+
+ if hasattr(image, "fileno"):
+ self._f_image = image
+ self._image_path = image.name
+ else:
+ self._image_path = image
+ self._open_image_file()
+
+ try:
+ self.image_size = os.fstat(self._f_image.fileno()).st_size
+ except IOError as err:
+ raise Error("cannot get information about file '%s': %s"
+ % (self._f_image.name, err))
+
+ try:
+ self.block_size = get_block_size(self._f_image)
+ except IOError as err:
+ raise Error("cannot get block size for '%s': %s"
+ % (self._image_path, err))
+
+ self.blocks_cnt = self.image_size + self.block_size - 1
+ self.blocks_cnt //= self.block_size
+
+ try:
+ self._f_image.flush()
+ except IOError as err:
+ raise Error("cannot flush image file '%s': %s"
+ % (self._image_path, err))
+
+ try:
+ os.fsync(self._f_image.fileno()),
+ except OSError as err:
+ raise Error("cannot synchronize image file '%s': %s "
+ % (self._image_path, err.strerror))
+
+ self._log.debug("opened image \"%s\"" % self._image_path)
+ self._log.debug("block size %d, blocks count %d, image size %d"
+ % (self.block_size, self.blocks_cnt, self.image_size))
+
+ def __del__(self):
+ """The class destructor which just closes the image file."""
+ if self._f_image_needs_close:
+ self._f_image.close()
+
+ def _open_image_file(self):
+ """Open the image file."""
+ try:
+ self._f_image = open(self._image_path, 'rb')
+ except IOError as err:
+ raise Error("cannot open image file '%s': %s"
+ % (self._image_path, err))
+
+ self._f_image_needs_close = True
+
+ def block_is_mapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is mapped and 'False'
+ otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. It returns
+ 'True' if block number 'block' of the image file is not mapped (hole)
+ and 'False' otherwise.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. This is a
+ generator which yields ranges of mapped blocks in the file. The ranges
+ are tuples of 2 elements: [first, last], where 'first' is the first
+ mapped block and 'last' is the last mapped block.
+
+ The ranges are yielded for the area of the file of size 'count' blocks,
+ starting from block 'start'.
+ """
+
+ raise Error("the method is not implemented")
+
+ def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
+ """
+ This method has has to be implemented by child classes. Just like
+ 'get_mapped_ranges()', but yields unmapped block ranges instead
+ (holes).
+ """
+
+ raise Error("the method is not implemented")
+
+
+# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
+_SEEK_DATA = 3
+_SEEK_HOLE = 4
+
+def _lseek(file_obj, offset, whence):
+ """This is a helper function which invokes 'os.lseek' for file object
+ 'file_obj' and with specified 'offset' and 'whence'. The 'whence'
+ argument is supposed to be either '_SEEK_DATA' or '_SEEK_HOLE'. When
+ there is no more data or hole starting from 'offset', this function
+ returns '-1'. Otherwise the data or hole position is returned."""
+
+ try:
+ return os.lseek(file_obj.fileno(), offset, whence)
+ except OSError as err:
+ # The 'lseek' system call returns the ENXIO if there is no data or
+ # hole starting from the specified offset.
+ if err.errno == os.errno.ENXIO:
+ return -1
+ elif err.errno == os.errno.EINVAL:
+ raise ErrorNotSupp("the kernel or file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\"")
+ else:
+ raise
+
+class FilemapSeek(_FilemapBase):
+ """
+ This class uses the 'SEEK_HOLE' and 'SEEK_DATA' to find file block mapping.
+ Unfortunately, the current implementation requires the caller to have write
+ access to the image file.
+ """
+
+ def __init__(self, image, log=None):
+ """Refer the '_FilemapBase' class for the documentation."""
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapSeek: initializing")
+
+ self._probe_seek_hole()
+
+ def _probe_seek_hole(self):
+ """
+ Check whether the system implements 'SEEK_HOLE' and 'SEEK_DATA'.
+ Unfortunately, there seems to be no clean way for detecting this,
+ because often the system just fakes them by just assuming that all
+ files are fully mapped, so 'SEEK_HOLE' always returns EOF and
+ 'SEEK_DATA' always returns the requested offset.
+
+ I could not invent a better way of detecting the fake 'SEEK_HOLE'
+ implementation than just to create a temporary file in the same
+ directory where the image file resides. It would be nice to change this
+ to something better.
+ """
+
+ directory = os.path.dirname(self._image_path)
+
+ try:
+ tmp_obj = tempfile.TemporaryFile("w+", dir=directory)
+ except IOError as err:
+ raise ErrorNotSupp("cannot create a temporary in \"%s\": %s" \
+ % (directory, err))
+
+ try:
+ os.ftruncate(tmp_obj.fileno(), self.block_size)
+ except OSError as err:
+ raise ErrorNotSupp("cannot truncate temporary file in \"%s\": %s"
+ % (directory, err))
+
+ offs = _lseek(tmp_obj, 0, _SEEK_HOLE)
+ if offs != 0:
+ # We are dealing with the stub 'SEEK_HOLE' implementation which
+ # always returns EOF.
+ self._log.debug("lseek(0, SEEK_HOLE) returned %d" % offs)
+ raise ErrorNotSupp("the file-system does not support "
+ "\"SEEK_HOLE\" and \"SEEK_DATA\" but only "
+ "provides a stub implementation")
+
+ tmp_obj.close()
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ offs = _lseek(self._f_image, block * self.block_size, _SEEK_DATA)
+ if offs == -1:
+ result = False
+ else:
+ result = (offs // self.block_size == block)
+
+ self._log.debug("FilemapSeek: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _get_ranges(self, start, count, whence1, whence2):
+ """
+ This function implements 'get_mapped_ranges()' and
+ 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
+ and 'whence2' arguments.
+ """
+
+ assert whence1 != whence2
+ end = start * self.block_size
+ limit = end + count * self.block_size
+
+ while True:
+ start = _lseek(self._f_image, end, whence1)
+ if start == -1 or start >= limit or start == self.image_size:
+ break
+
+ end = _lseek(self._f_image, start, whence2)
+ if end == -1 or end == self.image_size:
+ end = self.blocks_cnt * self.block_size
+ if end > limit:
+ end = limit
+
+ start_blk = start // self.block_size
+ end_blk = end // self.block_size - 1
+ self._log.debug("FilemapSeek: yielding range (%d, %d)"
+ % (start_blk, end_blk))
+ yield (start_blk, end_blk)
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
+
+
+# Below goes the FIEMAP ioctl implementation, which is not very readable
+# because it deals with the rather complex FIEMAP ioctl. To understand the
+# code, you need to know the FIEMAP interface, which is documented in the
+# "Documentation/filesystems/fiemap.txt" file in the Linux kernel sources.
+
+# Format string for 'struct fiemap'
+_FIEMAP_FORMAT = "=QQLLLL"
+# sizeof(struct fiemap)
+_FIEMAP_SIZE = struct.calcsize(_FIEMAP_FORMAT)
+# Format string for 'struct fiemap_extent'
+_FIEMAP_EXTENT_FORMAT = "=QQQQQLLLL"
+# sizeof(struct fiemap_extent)
+_FIEMAP_EXTENT_SIZE = struct.calcsize(_FIEMAP_EXTENT_FORMAT)
+# The FIEMAP ioctl number
+_FIEMAP_IOCTL = 0xC020660B
+# This FIEMAP ioctl flag which instructs the kernel to sync the file before
+# reading the block map
+_FIEMAP_FLAG_SYNC = 0x00000001
+# Size of the buffer for 'struct fiemap_extent' elements which will be used
+# when invoking the FIEMAP ioctl. The larger is the buffer, the less times the
+# FIEMAP ioctl will be invoked.
+_FIEMAP_BUFFER_SIZE = 256 * 1024
+
+class FilemapFiemap(_FilemapBase):
+ """
+ This class provides API to the FIEMAP ioctl. Namely, it allows to iterate
+ over all mapped blocks and over all holes.
+
+ This class synchronizes the image file every time it invokes the FIEMAP
+ ioctl in order to work-around early FIEMAP implementation kernel bugs.
+ """
+
+ def __init__(self, image, log=None):
+ """
+ Initialize a class instance. The 'image' argument is full the file
+ object to operate on.
+ """
+
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapFiemap: initializing")
+
+ self._buf_size = _FIEMAP_BUFFER_SIZE
+
+ # Calculate how many 'struct fiemap_extent' elements fit the buffer
+ self._buf_size -= _FIEMAP_SIZE
+ self._fiemap_extent_cnt = self._buf_size // _FIEMAP_EXTENT_SIZE
+ assert self._fiemap_extent_cnt > 0
+ self._buf_size = self._fiemap_extent_cnt * _FIEMAP_EXTENT_SIZE
+ self._buf_size += _FIEMAP_SIZE
+
+ # Allocate a mutable buffer for the FIEMAP ioctl
+ self._buf = array.array('B', [0] * self._buf_size)
+
+ # Check if the FIEMAP ioctl is supported
+ self.block_is_mapped(0)
+
+ def _invoke_fiemap(self, block, count):
+ """
+ Invoke the FIEMAP ioctl for 'count' blocks of the file starting from
+ block number 'block'.
+
+ The full result of the operation is stored in 'self._buf' on exit.
+ Returns the unpacked 'struct fiemap' data structure in form of a python
+ list (just like 'struct.upack()').
+ """
+
+ if self.blocks_cnt != 0 and (block < 0 or block >= self.blocks_cnt):
+ raise Error("bad block number %d, should be within [0, %d]"
+ % (block, self.blocks_cnt))
+
+ # Initialize the 'struct fiemap' part of the buffer. We use the
+ # '_FIEMAP_FLAG_SYNC' flag in order to make sure the file is
+ # synchronized. The reason for this is that early FIEMAP
+ # implementations had many bugs related to cached dirty data, and
+ # synchronizing the file is a necessary work-around.
+ struct.pack_into(_FIEMAP_FORMAT, self._buf, 0, block * self.block_size,
+ count * self.block_size, _FIEMAP_FLAG_SYNC, 0,
+ self._fiemap_extent_cnt, 0)
+
+ try:
+ fcntl.ioctl(self._f_image, _FIEMAP_IOCTL, self._buf, 1)
+ except IOError as err:
+ # Note, the FIEMAP ioctl is supported by the Linux kernel starting
+ # from version 2.6.28 (year 2008).
+ if err.errno == os.errno.EOPNOTSUPP:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the file-system"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ if err.errno == os.errno.ENOTTY:
+ errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
+ "by the kernel"
+ self._log.debug(errstr)
+ raise ErrorNotSupp(errstr)
+ raise Error("the FIEMAP ioctl failed for '%s': %s"
+ % (self._image_path, err))
+
+ return struct.unpack(_FIEMAP_FORMAT, self._buf[:_FIEMAP_SIZE])
+
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ struct_fiemap = self._invoke_fiemap(block, 1)
+
+ # The 3rd element of 'struct_fiemap' is the 'fm_mapped_extents' field.
+ # If it contains zero, the block is not mapped, otherwise it is
+ # mapped.
+ result = bool(struct_fiemap[3])
+ self._log.debug("FilemapFiemap: block_is_mapped(%d) returns %s"
+ % (block, result))
+ return result
+
+ def block_is_unmapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return not self.block_is_mapped(block)
+
+ def _unpack_fiemap_extent(self, index):
+ """
+ Unpack a 'struct fiemap_extent' structure object number 'index' from
+ the internal 'self._buf' buffer.
+ """
+
+ offset = _FIEMAP_SIZE + _FIEMAP_EXTENT_SIZE * index
+ return struct.unpack(_FIEMAP_EXTENT_FORMAT,
+ self._buf[offset : offset + _FIEMAP_EXTENT_SIZE])
+
+ def _do_get_mapped_ranges(self, start, count):
+ """
+ Implements most the functionality for the 'get_mapped_ranges()'
+ generator: invokes the FIEMAP ioctl, walks through the mapped extents
+ and yields mapped block ranges. However, the ranges may be consecutive
+ (e.g., (1, 100), (100, 200)) and 'get_mapped_ranges()' simply merges
+ them.
+ """
+
+ block = start
+ while block < start + count:
+ struct_fiemap = self._invoke_fiemap(block, count)
+
+ mapped_extents = struct_fiemap[3]
+ if mapped_extents == 0:
+ # No more mapped blocks
+ return
+
+ extent = 0
+ while extent < mapped_extents:
+ fiemap_extent = self._unpack_fiemap_extent(extent)
+
+ # Start of the extent
+ extent_start = fiemap_extent[0]
+ # Starting block number of the extent
+ extent_block = extent_start // self.block_size
+ # Length of the extent
+ extent_len = fiemap_extent[2]
+ # Count of blocks in the extent
+ extent_count = extent_len // self.block_size
+
+ # Extent length and offset have to be block-aligned
+ assert extent_start % self.block_size == 0
+ assert extent_len % self.block_size == 0
+
+ if extent_block > start + count - 1:
+ return
+
+ first = max(extent_block, block)
+ last = min(extent_block + extent_count, start + count) - 1
+ yield (first, last)
+
+ extent += 1
+
+ block = extent_block + extent_count
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ iterator = self._do_get_mapped_ranges(start, count)
+ first_prev, last_prev = next(iterator)
+
+ for first, last in iterator:
+ if last_prev == first - 1:
+ last_prev = last
+ else:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+ first_prev, last_prev = first, last
+
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (first_prev, last_prev))
+ yield (first_prev, last_prev)
+
+ def get_unmapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ hole_first = start
+ for first, last in self._do_get_mapped_ranges(start, count):
+ if first > hole_first:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, first - 1))
+ yield (hole_first, first - 1)
+
+ hole_first = last + 1
+
+ if hole_first < start + count:
+ self._log.debug("FilemapFiemap: yielding range (%d, %d)"
+ % (hole_first, start + count - 1))
+ yield (hole_first, start + count - 1)
+
+def filemap(image, log=None):
+ """
+ Create and return an instance of a Filemap class - 'FilemapFiemap' or
+ 'FilemapSeek', depending on what the system we run on supports. If the
+ FIEMAP ioctl is supported, an instance of the 'FilemapFiemap' class is
+ returned. Otherwise, if 'SEEK_HOLE' is supported an instance of the
+ 'FilemapSeek' class is returned. If none of these are supported, the
+ function generates an 'Error' type exception.
+ """
+
+ try:
+ return FilemapFiemap(image, log)
+ except ErrorNotSupp:
+ return FilemapSeek(image, log)
+
+def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
+ length=0, api=None):
+ """
+ Efficiently copy sparse file to or into another file.
+
+ src_fname: path to source file
+ dst_fname: path to destination file
+ skip: skip N bytes at thestart of src
+ seek: seek N bytes from the start of dst
+ length: read N bytes from src and write them to dst
+ api: FilemapFiemap or FilemapSeek object
+ """
+ if not api:
+ api = filemap
+ fmap = api(src_fname)
+ try:
+ dst_file = open(dst_fname, 'r+b')
+ except IOError:
+ dst_file = open(dst_fname, 'wb')
+ if length:
+ dst_size = length + seek
+ else:
+ dst_size = os.path.getsize(src_fname) + seek - skip
+ dst_file.truncate(dst_size)
+
+ written = 0
+ for first, last in fmap.get_mapped_ranges(0, fmap.blocks_cnt):
+ start = first * fmap.block_size
+ end = (last + 1) * fmap.block_size
+
+ if skip >= end:
+ continue
+
+ if start < skip < end:
+ start = skip
+
+ fmap._f_image.seek(start, os.SEEK_SET)
+
+ written += start - skip - written
+ if length and written >= length:
+ dst_file.seek(seek + length, os.SEEK_SET)
+ dst_file.close()
+ return
+
+ dst_file.seek(seek + start - skip, os.SEEK_SET)
+
+ chunk_size = 1024 * 1024
+ to_read = end - start
+ read = 0
+
+ while read < to_read:
+ if read + chunk_size > to_read:
+ chunk_size = to_read - read
+ size = chunk_size
+ if length and written + size > length:
+ size = length - written
+ chunk = fmap._f_image.read(size)
+ dst_file.write(chunk)
+ read += size
+ written += size
+ if written == length:
+ dst_file.close()
+ return
+ dst_file.close()
diff --git a/poky/scripts/lib/wic/help.py b/poky/scripts/lib/wic/help.py
new file mode 100644
index 000000000..842b868a5
--- /dev/null
+++ b/poky/scripts/lib/wic/help.py
@@ -0,0 +1,1055 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module implements some basic help invocation functions along
+# with the bulk of the help topic text for the OE Core Image Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import subprocess
+import logging
+
+from wic.pluginbase import PluginMgr, PLUGIN_TYPES
+
+logger = logging.getLogger('wic')
+
+def subcommand_error(args):
+ logger.info("invalid subcommand %s", args[0])
+
+
+def display_help(subcommand, subcommands):
+ """
+ Display help for subcommand.
+ """
+ if subcommand not in subcommands:
+ return False
+
+ hlp = subcommands.get(subcommand, subcommand_error)[2]
+ if callable(hlp):
+ hlp = hlp()
+ pager = subprocess.Popen('less', stdin=subprocess.PIPE)
+ pager.communicate(hlp.encode('utf-8'))
+
+ return True
+
+
+def wic_help(args, usage_str, subcommands):
+ """
+ Subcommand help dispatcher.
+ """
+ if args.help_topic == None or not display_help(args.help_topic, subcommands):
+ print(usage_str)
+
+
+def get_wic_plugins_help():
+ """
+ Combine wic_plugins_help with the help for every known
+ source plugin.
+ """
+ result = wic_plugins_help
+ for plugin_type in PLUGIN_TYPES:
+ result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
+ for name, plugin in PluginMgr.get_plugins(plugin_type).items():
+ result += "\n %s plugin:\n" % name
+ if plugin.__doc__:
+ result += plugin.__doc__
+ else:
+ result += "\n %s is missing docstring\n" % plugin
+ return result
+
+
+def invoke_subcommand(args, parser, main_command_usage, subcommands):
+ """
+ Dispatch to subcommand handler borrowed from combo-layer.
+ Should use argparse, but has to work in 2.6.
+ """
+ if not args.command:
+ logger.error("No subcommand specified, exiting")
+ parser.print_help()
+ return 1
+ elif args.command == "help":
+ wic_help(args, main_command_usage, subcommands)
+ elif args.command not in subcommands:
+ logger.error("Unsupported subcommand %s, exiting\n", args.command)
+ parser.print_help()
+ return 1
+ else:
+ subcmd = subcommands.get(args.command, subcommand_error)
+ usage = subcmd[1]
+ subcmd[0](args, usage)
+
+
+##
+# wic help and usage strings
+##
+
+wic_usage = """
+
+ Create a customized OpenEmbedded image
+
+ usage: wic [--version] | [--help] | [COMMAND [ARGS]]
+
+ Current 'wic' commands are:
+ help Show help for command or one of the topics (see below)
+ create Create a new OpenEmbedded image
+ list List available canned images and source plugins
+
+ Help topics:
+ overview wic overview - General overview of wic
+ plugins wic plugins - Overview and API
+ kickstart wic kickstart - wic kickstart reference
+"""
+
+wic_help_usage = """
+
+ usage: wic help <subcommand>
+
+ This command displays detailed help for the specified subcommand.
+"""
+
+wic_create_usage = """
+
+ Create a new OpenEmbedded image
+
+ usage: wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir]
+ [-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
+ [-c, --compress-with] [-m, --bmap]
+
+ This command creates an OpenEmbedded image based on the 'OE kickstart
+ commands' found in the <wks file>.
+
+ The -o option can be used to place the image in a directory with a
+ different name and location.
+
+ See 'wic help create' for more detailed instructions.
+"""
+
+wic_create_help = """
+
+NAME
+ wic create - Create a new OpenEmbedded image
+
+SYNOPSIS
+ wic create <wks file or image name> [-o <DIRNAME> | --outdir <DIRNAME>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir]
+ [-k, --kernel-dir] [-n, --native-sysroot] [-f, --build-rootfs]
+ [-c, --compress-with] [-m, --bmap] [--no-fstab-update]
+
+DESCRIPTION
+ This command creates an OpenEmbedded image based on the 'OE
+ kickstart commands' found in the <wks file>.
+
+ In order to do this, wic needs to know the locations of the
+ various build artifacts required to build the image.
+
+ Users can explicitly specify the build artifact locations using
+ the -r, -b, -k, and -n options. See below for details on where
+ the corresponding artifacts are typically found in a normal
+ OpenEmbedded build.
+
+ Alternatively, users can use the -e option to have 'wic' determine
+ those locations for a given image. If the -e option is used, the
+ user needs to have set the appropriate MACHINE variable in
+ local.conf, and have sourced the build environment.
+
+ The -e option is used to specify the name of the image to use the
+ artifacts from e.g. core-image-sato.
+
+ The -r option is used to specify the path to the /rootfs dir to
+ use as the .wks rootfs source.
+
+ The -b option is used to specify the path to the dir containing
+ the boot artifacts (e.g. /EFI or /syslinux dirs) to use as the
+ .wks bootimg source.
+
+ The -k option is used to specify the path to the dir containing
+ the kernel to use in the .wks bootimg.
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools to use to build the image.
+
+ The -f option is used to build rootfs by running "bitbake <image>"
+
+ The -s option is used to skip the build check. The build check is
+ a simple sanity check used to determine whether the user has
+ sourced the build environment so that the -e option can operate
+ correctly. If the user has specified the build artifact locations
+ explicitly, 'wic' assumes the user knows what he or she is doing
+ and skips the build check.
+
+ The -D option is used to display debug information detailing
+ exactly what happens behind the scenes when a create request is
+ fulfilled (or not, as the case may be). It enumerates and
+ displays the command sequence used, and should be included in any
+ bug report describing unexpected results.
+
+ When 'wic -e' is used, the locations for the build artifacts
+ values are determined by 'wic -e' from the output of the 'bitbake
+ -e' command given an image name e.g. 'core-image-minimal' and a
+ given machine set in local.conf. In that case, the image is
+ created as if the following 'bitbake -e' variables were used:
+
+ -r: IMAGE_ROOTFS
+ -k: STAGING_KERNEL_DIR
+ -n: STAGING_DIR_NATIVE
+ -b: empty (plugin-specific handlers must determine this)
+
+ If 'wic -e' is not used, the user needs to select the appropriate
+ value for -b (as well as -r, -k, and -n).
+
+ The -o option can be used to place the image in a directory with a
+ different name and location.
+
+ The -c option is used to specify compressor utility to compress
+ an image. gzip, bzip2 and xz compressors are supported.
+
+ The -m option is used to produce .bmap file for the image. This file
+ can be used to flash image using bmaptool utility.
+
+ The --no-fstab-update option is used to doesn't change fstab file. When
+ using this option the final fstab file will be same that in rootfs and
+ wic doesn't update file, e.g adding a new mount point. User can control
+ the fstab file content in base-files recipe.
+"""
+
+wic_list_usage = """
+
+ List available OpenEmbedded images and source plugins
+
+ usage: wic list images
+ wic list <image> help
+ wic list source-plugins
+
+ This command enumerates the set of available canned images as well as
+ help for those images. It also can be used to list of available source
+ plugins.
+
+ The first form enumerates all the available 'canned' images.
+
+ The second form lists the detailed help information for a specific
+ 'canned' image.
+
+ The third form enumerates all the available --sources (source
+ plugins).
+
+ See 'wic help list' for more details.
+"""
+
+wic_list_help = """
+
+NAME
+ wic list - List available OpenEmbedded images and source plugins
+
+SYNOPSIS
+ wic list images
+ wic list <image> help
+ wic list source-plugins
+
+DESCRIPTION
+ This command enumerates the set of available canned images as well
+ as help for those images. It also can be used to list available
+ source plugins.
+
+ The first form enumerates all the available 'canned' images.
+ These are actually just the set of .wks files that have been moved
+ into the /scripts/lib/wic/canned-wks directory).
+
+ The second form lists the detailed help information for a specific
+ 'canned' image.
+
+ The third form enumerates all the available --sources (source
+ plugins). The contents of a given partition are driven by code
+ defined in 'source plugins'. Users specify a specific plugin via
+ the --source parameter of the partition .wks command. Normally
+ this is the 'rootfs' plugin but can be any of the more specialized
+ sources listed by the 'list source-plugins' command. Users can
+ also add their own source plugins - see 'wic help plugins' for
+ details.
+"""
+
+wic_ls_usage = """
+
+ List content of a partitioned image
+
+ usage: wic ls <image>[:<partition>[<path>]] [--native-sysroot <path>]
+
+ This command outputs either list of image partitions or directory contents
+ of vfat and ext* partitions.
+
+ See 'wic help ls' for more detailed instructions.
+
+"""
+
+wic_ls_help = """
+
+NAME
+ wic ls - List contents of partitioned image or partition
+
+SYNOPSIS
+ wic ls <image>
+ wic ls <image>:<vfat or ext* partition>
+ wic ls <image>:<vfat or ext* partition><path>
+ wic ls <image>:<vfat or ext* partition><path> --native-sysroot <path>
+
+DESCRIPTION
+ This command lists either partitions of the image or directory contents
+ of vfat or ext* partitions.
+
+ The first form it lists partitions of the image.
+ For example:
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic
+ Num Start End Size Fstype
+ 1 1048576 24438783 23390208 fat16
+ 2 25165824 50315263 25149440 ext4
+
+ Second and third form list directory content of the partition:
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 2DF2-5F02
+ Directory for ::/
+
+ efi <DIR> 2017-05-11 10:54
+ startup nsh 26 2017-05-11 10:54
+ vmlinuz 6922288 2017-05-11 10:54
+ 3 files 6 922 314 bytes
+ 15 818 752 bytes free
+
+
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/EFI/boot/
+ Volume in drive : is boot
+ Volume Serial Number is 2DF2-5F02
+ Directory for ::/EFI/boot
+
+ . <DIR> 2017-05-11 10:54
+ .. <DIR> 2017-05-11 10:54
+ grub cfg 679 2017-05-11 10:54
+ bootx64 efi 571392 2017-05-11 10:54
+ 4 files 572 071 bytes
+ 15 818 752 bytes free
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+
+"""
+
+wic_cp_usage = """
+
+ Copy files and directories to the vfat or ext* partition
+
+ usage: wic cp <src> <image>:<partition>[<path>] [--native-sysroot <path>]
+
+ This command copies local files or directories to the vfat or ext* partitions
+of partitioned image.
+
+ See 'wic help cp' for more detailed instructions.
+
+"""
+
+wic_cp_help = """
+
+NAME
+ wic cp - copy files and directories to the vfat or ext* partitions
+
+SYNOPSIS
+ wic cp <src> <image>:<partition>
+ wic cp <src> <image>:<partition><path>
+ wic cp <src> <image>:<partition><path> --native-sysroot <path>
+
+DESCRIPTION
+ This command copies files and directories to the vfat or ext* partition of
+ the partitioned image.
+
+ The first form of it copies file or directory to the root directory of
+ the partition:
+ $ wic cp test.wks tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is DB4C-FD4C
+ Directory for ::/
+
+ efi <DIR> 2017-05-24 18:15
+ loader <DIR> 2017-05-24 18:15
+ startup nsh 26 2017-05-24 18:15
+ vmlinuz 6926384 2017-05-24 18:15
+ test wks 628 2017-05-24 21:22
+ 5 files 6 927 038 bytes
+ 15 677 440 bytes free
+
+ The second form of the command copies file or directory to the specified directory
+ on the partition:
+ $ wic cp test tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
+ $ wic ls tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/efi/
+ Volume in drive : is boot
+ Volume Serial Number is DB4C-FD4C
+ Directory for ::/efi
+
+ . <DIR> 2017-05-24 18:15
+ .. <DIR> 2017-05-24 18:15
+ boot <DIR> 2017-05-24 18:15
+ test <DIR> 2017-05-24 21:27
+ 4 files 0 bytes
+ 15 675 392 bytes free
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+"""
+
+wic_rm_usage = """
+
+ Remove files or directories from the vfat or ext* partitions
+
+ usage: wic rm <image>:<partition><path> [--native-sysroot <path>]
+
+ This command removes files or directories from the vfat or ext* partitions of
+ the partitioned image.
+
+ See 'wic help rm' for more detailed instructions.
+
+"""
+
+wic_rm_help = """
+
+NAME
+ wic rm - remove files or directories from the vfat or ext* partitions
+
+SYNOPSIS
+ wic rm <src> <image>:<partition><path>
+ wic rm <src> <image>:<partition><path> --native-sysroot <path>
+
+DESCRIPTION
+ This command removes files or directories from the vfat or ext* partition of the
+ partitioned image:
+
+ $ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 11D0-DE21
+ Directory for ::/
+
+ libcom32 c32 186500 2017-06-02 15:15
+ libutil c32 24148 2017-06-02 15:15
+ syslinux cfg 209 2017-06-02 15:15
+ vesamenu c32 27104 2017-06-02 15:15
+ vmlinuz 6926384 2017-06-02 15:15
+ 5 files 7 164 345 bytes
+ 16 582 656 bytes free
+
+ $ wic rm ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/libutil.c32
+
+ $ wic ls ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1
+ Volume in drive : is boot
+ Volume Serial Number is 11D0-DE21
+ Directory for ::/
+
+ libcom32 c32 186500 2017-06-02 15:15
+ syslinux cfg 209 2017-06-02 15:15
+ vesamenu c32 27104 2017-06-02 15:15
+ vmlinuz 6926384 2017-06-02 15:15
+ 4 files 7 140 197 bytes
+ 16 607 232 bytes free
+
+ The -n option is used to specify the path to the native sysroot
+ containing the tools(parted and mtools) to use.
+"""
+
+wic_write_usage = """
+
+ Write image to a device
+
+ usage: wic write <image> <target device> [--expand [rules]] [--native-sysroot <path>]
+
+ This command writes partitioned image to a target device (USB stick, SD card etc).
+
+ See 'wic help write' for more detailed instructions.
+
+"""
+
+wic_write_help = """
+
+NAME
+ wic write - write an image to a device
+
+SYNOPSIS
+ wic write <image> <target>
+ wic write <image> <target> --expand auto
+ wic write <image> <target> --expand 1:100M-2:300M
+ wic write <image> <target> --native-sysroot <path>
+
+DESCRIPTION
+ This command writes an image to a target device (USB stick, SD card etc)
+
+ $ wic write ./tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic /dev/sdb
+
+ The --expand option is used to resize image partitions.
+ --expand auto expands partitions to occupy all free space available on the target device.
+ It's also possible to specify expansion rules in a format
+ <partition>:<size>[-<partition>:<size>...] for one or more partitions.
+ Specifying size 0 will keep partition unmodified.
+ Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
+ recommended to use size 0 for boot partition to keep image bootable.
+
+ The --native-sysroot option is used to specify the path to the native sysroot
+ containing the tools(parted, resize2fs) to use.
+"""
+
+wic_plugins_help = """
+
+NAME
+ wic plugins - Overview and API
+
+DESCRIPTION
+ plugins allow wic functionality to be extended and specialized by
+ users. This section documents the plugin interface, which is
+ currently restricted to 'source' plugins.
+
+ 'Source' plugins provide a mechanism to customize various aspects
+ of the image generation process in wic, mainly the contents of
+ partitions.
+
+ Source plugins provide a mechanism for mapping values specified in
+ .wks files using the --source keyword to a particular plugin
+ implementation that populates a corresponding partition.
+
+ A source plugin is created as a subclass of SourcePlugin (see
+ scripts/lib/wic/pluginbase.py) and the plugin file containing it
+ is added to scripts/lib/wic/plugins/source/ to make the plugin
+ implementation available to the wic implementation.
+
+ Source plugins can also be implemented and added by external
+ layers - any plugins found in a scripts/lib/wic/plugins/source/
+ directory in an external layer will also be made available.
+
+ When the wic implementation needs to invoke a partition-specific
+ implementation, it looks for the plugin that has the same name as
+ the --source param given to that partition. For example, if the
+ partition is set up like this:
+
+ part /boot --source bootimg-pcbios ...
+
+ then the methods defined as class members of the plugin having the
+ matching bootimg-pcbios .name class member would be used.
+
+ To be more concrete, here's the plugin definition that would match
+ a '--source bootimg-pcbios' usage, along with an example method
+ that would be called by the wic implementation when it needed to
+ invoke an implementation-specific partition-preparation function:
+
+ class BootimgPcbiosPlugin(SourcePlugin):
+ name = 'bootimg-pcbios'
+
+ @classmethod
+ def do_prepare_partition(self, part, ...)
+
+ If the subclass itself doesn't implement a function, a 'default'
+ version in a superclass will be located and used, which is why all
+ plugins must be derived from SourcePlugin.
+
+ The SourcePlugin class defines the following methods, which is the
+ current set of methods that can be implemented/overridden by
+ --source plugins. Any methods not implemented by a SourcePlugin
+ subclass inherit the implementations present in the SourcePlugin
+ class (see the SourcePlugin source for details):
+
+ do_prepare_partition()
+ Called to do the actual content population for a
+ partition. In other words, it 'prepares' the final partition
+ image which will be incorporated into the disk image.
+
+ do_post_partition()
+ Called after the partition is created. It is useful to add post
+ operations e.g. signing the partition.
+
+ do_configure_partition()
+ Called before do_prepare_partition(), typically used to
+ create custom configuration files for a partition, for
+ example syslinux or grub config files.
+
+ do_install_disk()
+ Called after all partitions have been prepared and assembled
+ into a disk image. This provides a hook to allow
+ finalization of a disk image, for example to write an MBR to
+ it.
+
+ do_stage_partition()
+ Special content-staging hook called before
+ do_prepare_partition(), normally empty.
+
+ Typically, a partition will just use the passed-in
+ parameters, for example the unmodified value of bootimg_dir.
+ In some cases however, things may need to be more tailored.
+ As an example, certain files may additionally need to be
+ take from bootimg_dir + /boot. This hook allows those files
+ to be staged in a customized fashion. Note that
+ get_bitbake_var() allows you to access non-standard
+ variables that you might want to use for these types of
+ situations.
+
+ This scheme is extensible - adding more hooks is a simple matter
+ of adding more plugin methods to SourcePlugin and derived classes.
+ Please see the implementation for details.
+"""
+
+wic_overview_help = """
+
+NAME
+ wic overview - General overview of wic
+
+DESCRIPTION
+ The 'wic' command generates partitioned images from existing
+ OpenEmbedded build artifacts. Image generation is driven by
+ partitioning commands contained in an 'Openembedded kickstart'
+ (.wks) file (see 'wic help kickstart') specified either directly
+ on the command-line or as one of a selection of canned .wks files
+ (see 'wic list images'). When applied to a given set of build
+ artifacts, the result is an image or set of images that can be
+ directly written onto media and used on a particular system.
+
+ The 'wic' command and the infrastructure it's based on is by
+ definition incomplete - its purpose is to allow the generation of
+ customized images, and as such was designed to be completely
+ extensible via a plugin interface (see 'wic help plugins').
+
+ Background and Motivation
+
+ wic is meant to be a completely independent standalone utility
+ that initially provides easier-to-use and more flexible
+ replacements for a couple bits of existing functionality in
+ oe-core: directdisk.bbclass and mkefidisk.sh. The difference
+ between wic and those examples is that with wic the functionality
+ of those scripts is implemented by a general-purpose partitioning
+ 'language' based on Redhat kickstart syntax).
+
+ The initial motivation and design considerations that lead to the
+ current tool are described exhaustively in Yocto Bug #3847
+ (https://bugzilla.yoctoproject.org/show_bug.cgi?id=3847).
+
+ Implementation and Examples
+
+ wic can be used in two different modes, depending on how much
+ control the user needs in specifying the Openembedded build
+ artifacts that will be used in creating the image: 'raw' and
+ 'cooked'.
+
+ If used in 'raw' mode, artifacts are explicitly specified via
+ command-line arguments (see example below).
+
+ The more easily usable 'cooked' mode uses the current MACHINE
+ setting and a specified image name to automatically locate the
+ artifacts used to create the image.
+
+ OE kickstart files (.wks) can of course be specified directly on
+ the command-line, but the user can also choose from a set of
+ 'canned' .wks files available via the 'wic list images' command
+ (example below).
+
+ In any case, the prerequisite for generating any image is to have
+ the build artifacts already available. The below examples assume
+ the user has already build a 'core-image-minimal' for a specific
+ machine (future versions won't require this redundant step, but
+ for now that's typically how build artifacts get generated).
+
+ The other prerequisite is to source the build environment:
+
+ $ source oe-init-build-env
+
+ To start out with, we'll generate an image from one of the canned
+ .wks files. The following generates a list of availailable
+ images:
+
+ $ wic list images
+ mkefidisk Create an EFI disk image
+ directdisk Create a 'pcbios' direct disk image
+
+ You can get more information about any of the available images by
+ typing 'wic list xxx help', where 'xxx' is one of the image names:
+
+ $ wic list mkefidisk help
+
+ Creates a partitioned EFI disk image that the user can directly dd
+ to boot media.
+
+ At any time, you can get help on the 'wic' command or any
+ subcommand (currently 'list' and 'create'). For instance, to get
+ the description of 'wic create' command and its parameters:
+
+ $ wic create
+
+ Usage:
+
+ Create a new OpenEmbedded image
+
+ usage: wic create <wks file or image name> [-o <DIRNAME> | ...]
+ [-i <JSON PROPERTY FILE> | --infile <JSON PROPERTY_FILE>]
+ [-e | --image-name] [-s, --skip-build-check] [-D, --debug]
+ [-r, --rootfs-dir] [-b, --bootimg-dir] [-k, --kernel-dir]
+ [-n, --native-sysroot] [-f, --build-rootfs]
+
+ This command creates an OpenEmbedded image based on the 'OE
+ kickstart commands' found in the <wks file>.
+
+ The -o option can be used to place the image in a directory
+ with a different name and location.
+
+ See 'wic help create' for more detailed instructions.
+ ...
+
+ As mentioned in the command, you can get even more detailed
+ information by adding 'help' to the above:
+
+ $ wic help create
+
+ So, the easiest way to create an image is to use the -e option
+ with a canned .wks file. To use the -e option, you need to
+ specify the image used to generate the artifacts and you actually
+ need to have the MACHINE used to build them specified in your
+ local.conf (these requirements aren't necessary if you aren't
+ using the -e options.) Below, we generate a directdisk image,
+ pointing the process at the core-image-minimal artifacts for the
+ current MACHINE:
+
+ $ wic create directdisk -e core-image-minimal
+
+ Checking basic build environment...
+ Done.
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ /var/tmp/wic/build/directdisk-201309252350-sda.direct
+
+ The following build artifacts were used to create the image(s):
+
+ ROOTFS_DIR: ...
+ BOOTIMG_DIR: ...
+ KERNEL_DIR: ...
+ NATIVE_SYSROOT: ...
+
+ The image(s) were created using OE kickstart file:
+ .../scripts/lib/wic/canned-wks/directdisk.wks
+
+ The output shows the name and location of the image created, and
+ so that you know exactly what was used to generate the image, each
+ of the artifacts and the kickstart file used.
+
+ Similarly, you can create a 'mkefidisk' image in the same way
+ (notice that this example uses a different machine - because it's
+ using the -e option, you need to change the MACHINE in your
+ local.conf):
+
+ $ wic create mkefidisk -e core-image-minimal
+ Checking basic build environment...
+ Done.
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ /var/tmp/wic/build/mkefidisk-201309260027-sda.direct
+
+ ...
+
+ Here's an example that doesn't take the easy way out and manually
+ specifies each build artifact, along with a non-canned .wks file,
+ and also uses the -o option to have wic create the output
+ somewhere other than the default /var/tmp/wic:
+
+ $ wic create ./test.wks -o ./out --rootfs-dir
+ tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
+ --bootimg-dir tmp/sysroots/qemux86-64/usr/share
+ --kernel-dir tmp/deploy/images/qemux86-64
+ --native-sysroot tmp/sysroots/x86_64-linux
+
+ Creating image(s)...
+
+ Info: The new image(s) can be found here:
+ out/build/test-201507211313-sda.direct
+
+ The following build artifacts were used to create the image(s):
+ ROOTFS_DIR: tmp/work/qemux86_64-poky-linux/core-image-minimal/1.0-r0/rootfs
+ BOOTIMG_DIR: tmp/sysroots/qemux86-64/usr/share
+ KERNEL_DIR: tmp/deploy/images/qemux86-64
+ NATIVE_SYSROOT: tmp/sysroots/x86_64-linux
+
+ The image(s) were created using OE kickstart file:
+ ./test.wks
+
+ Here is a content of test.wks:
+
+ part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024
+ part / --source rootfs --ondisk sda --fstype=ext3 --label platform --align 1024
+
+ bootloader --timeout=0 --append="rootwait rootfstype=ext3 video=vesafb vga=0x318 console=tty0"
+
+
+ Finally, here's an example of the actual partition language
+ commands used to generate the mkefidisk image i.e. these are the
+ contents of the mkefidisk.wks OE kickstart file:
+
+ # short-description: Create an EFI disk image
+ # long-description: Creates a partitioned EFI disk image that the user
+ # can directly dd to boot media.
+
+ part /boot --source bootimg-efi --ondisk sda --fstype=efi --active
+
+ part / --source rootfs --ondisk sda --fstype=ext3 --label platform
+
+ part swap --ondisk sda --size 44 --label swap1 --fstype=swap
+
+ bootloader --timeout=10 --append="rootwait console=ttyPCH0,115200"
+
+ You can get a complete listing and description of all the
+ kickstart commands available for use in .wks files from 'wic help
+ kickstart'.
+"""
+
+wic_kickstart_help = """
+
+NAME
+ wic kickstart - wic kickstart reference
+
+DESCRIPTION
+ This section provides the definitive reference to the wic
+ kickstart language. It also provides documentation on the list of
+ --source plugins available for use from the 'part' command (see
+ the 'Platform-specific Plugins' section below).
+
+ The current wic implementation supports only the basic kickstart
+ partitioning commands: partition (or part for short) and
+ bootloader.
+
+ The following is a listing of the commands, their syntax, and
+ meanings. The commands are based on the Fedora kickstart
+ documentation but with modifications to reflect wic capabilities.
+
+ http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
+ http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+
+ Commands
+
+ * 'part' or 'partition'
+
+ This command creates a partition on the system and uses the
+ following syntax:
+
+ part [<mountpoint>]
+
+ The <mountpoint> is where the partition will be mounted and
+ must take of one of the following forms:
+
+ /<path>: For example: /, /usr, or /home
+
+ swap: The partition will be used as swap space.
+
+ If a <mountpoint> is not specified the partition will be created
+ but will not be mounted.
+
+ Partitions with a <mountpoint> specified will be automatically mounted.
+ This is achieved by wic adding entries to the fstab during image
+ generation. In order for a valid fstab to be generated one of the
+ --ondrive, --ondisk or --use-uuid partition options must be used for
+ each partition that specifies a mountpoint. Note that with --use-uuid
+ and non-root <mountpoint>, including swap, the mount program must
+ understand the PARTUUID syntax. This currently excludes the busybox
+ versions of these applications.
+
+
+ The following are supported 'part' options:
+
+ --size: The minimum partition size. Specify an integer value
+ such as 500. Multipliers k, M ang G can be used. If
+ not specified, the size is in MB.
+ You do not need this option if you use --source.
+
+ --fixed-size: Exact partition size. Value format is the same
+ as for --size option. This option cannot be
+ specified along with --size. If partition data
+ is larger than --fixed-size and error will be
+ raised when assembling disk image.
+
+ --source: This option is a wic-specific option that names the
+ source of the data that will populate the
+ partition. The most common value for this option
+ is 'rootfs', but can be any value which maps to a
+ valid 'source plugin' (see 'wic help plugins').
+
+ If '--source rootfs' is used, it tells the wic
+ command to create a partition as large as needed
+ and to fill it with the contents of the root
+ filesystem pointed to by the '-r' wic command-line
+ option (or the equivalent rootfs derived from the
+ '-e' command-line option). The filesystem type
+ that will be used to create the partition is driven
+ by the value of the --fstype option specified for
+ the partition (see --fstype below).
+
+ If --source <plugin-name>' is used, it tells the
+ wic command to create a partition as large as
+ needed and to fill with the contents of the
+ partition that will be generated by the specified
+ plugin name using the data pointed to by the '-r'
+ wic command-line option (or the equivalent rootfs
+ derived from the '-e' command-line option).
+ Exactly what those contents and filesystem type end
+ up being are dependent on the given plugin
+ implementation.
+
+ If --source option is not used, the wic command
+ will create empty partition. --size parameter has
+ to be used to specify size of empty partition.
+
+ --ondisk or --ondrive: Forces the partition to be created on
+ a particular disk.
+
+ --fstype: Sets the file system type for the partition. These
+ apply to partitions created using '--source rootfs' (see
+ --source above). Valid values are:
+
+ vfat
+ msdos
+ ext2
+ ext3
+ ext4
+ btrfs
+ squashfs
+ swap
+
+ --fsoptions: Specifies a free-form string of options to be
+ used when mounting the filesystem. This string
+ will be copied into the /etc/fstab file of the
+ installed system and should be enclosed in
+ quotes. If not specified, the default string is
+ "defaults".
+
+ --label label: Specifies the label to give to the filesystem
+ to be made on the partition. If the given
+ label is already in use by another filesystem,
+ a new label is created for the partition.
+
+ --active: Marks the partition as active.
+
+ --align (in KBytes): This option is specific to wic and says
+ to start a partition on an x KBytes
+ boundary.
+
+ --no-table: This option is specific to wic. Space will be
+ reserved for the partition and it will be
+ populated but it will not be added to the
+ partition table. It may be useful for
+ bootloaders.
+
+ --exclude-path: This option is specific to wic. It excludes the given
+ relative path from the resulting image. If the path
+ ends with a slash, only the content of the directory
+ is omitted, not the directory itself. This option only
+ has an effect with the rootfs source plugin.
+
+ --extra-space: This option is specific to wic. It adds extra
+ space after the space filled by the content
+ of the partition. The final size can go
+ beyond the size specified by --size.
+ By default, 10MB. This option cannot be used
+ with --fixed-size option.
+
+ --overhead-factor: This option is specific to wic. The
+ size of the partition is multiplied by
+ this factor. It has to be greater than or
+ equal to 1. The default value is 1.3.
+ This option cannot be used with --fixed-size
+ option.
+
+ --part-name: This option is specific to wic. It specifies name for GPT partitions.
+
+ --part-type: This option is specific to wic. It specifies partition
+ type GUID for GPT partitions.
+ List of partition type GUIDS can be found here:
+ http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
+
+ --use-uuid: This option is specific to wic. It makes wic to generate
+ random globally unique identifier (GUID) for the partition
+ and use it in bootloader configuration to specify root partition.
+
+ --uuid: This option is specific to wic. It specifies partition UUID.
+ It's useful if preconfigured partition UUID is added to kernel command line
+ in bootloader configuration before running wic. In this case .wks file can
+ be generated or modified to set preconfigured parition UUID using this option.
+
+ --fsuuid: This option is specific to wic. It specifies filesystem UUID.
+ It's useful if preconfigured filesystem UUID is added to kernel command line
+ in bootloader configuration before running wic. In this case .wks file can
+ be generated or modified to set preconfigured filesystem UUID using this option.
+
+ --system-id: This option is specific to wic. It specifies partition system id. It's useful
+ for the harware that requires non-default partition system ids. The parameter
+ in one byte long hex number either with 0x prefix or without it.
+
+ --mkfs-extraopts: This option specifies extra options to pass to mkfs utility.
+ NOTE, that wic uses default options for some filesystems, for example
+ '-S 512' for mkfs.fat or '-F -i 8192' for mkfs.ext. Those options will
+ not take effect when --mkfs-extraopts is used. This should be taken into
+ account when using --mkfs-extraopts.
+
+ * bootloader
+
+ This command allows the user to specify various bootloader
+ options. The following are supported 'bootloader' options:
+
+ --timeout: Specifies the number of seconds before the
+ bootloader times out and boots the default option.
+
+ --append: Specifies kernel parameters. These will be added to
+ bootloader command-line - for example, the syslinux
+ APPEND or grub kernel command line.
+
+ --configfile: Specifies a user defined configuration file for
+ the bootloader. This file must be located in the
+ canned-wks folder or could be the full path to the
+ file. Using this option will override any other
+ bootloader option.
+
+ Note that bootloader functionality and boot partitions are
+ implemented by the various --source plugins that implement
+ bootloader functionality; the bootloader command essentially
+ provides a means of modifying bootloader configuration.
+
+ * include
+
+ This command allows the user to include the content of .wks file
+ into original .wks file.
+
+ Command uses the following syntax:
+
+ include <file>
+
+ The <file> is either path to the file or its name. If name is
+ specified wic will try to find file in the directories with canned
+ .wks files.
+
+"""
+
+wic_help_help = """
+NAME
+ wic help - display a help topic
+
+DESCRIPTION
+ Specify a help topic to display it. Topics are shown above.
+"""
diff --git a/poky/scripts/lib/wic/ksparser.py b/poky/scripts/lib/wic/ksparser.py
new file mode 100644
index 000000000..e590b2fe3
--- /dev/null
+++ b/poky/scripts/lib/wic/ksparser.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python -tt
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2016 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This module provides parser for kickstart format
+#
+# AUTHORS
+# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
+
+"""Kickstart parser module."""
+
+import os
+import shlex
+import logging
+
+from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
+
+from wic.engine import find_canned
+from wic.partition import Partition
+
+logger = logging.getLogger('wic')
+
+class KickStartError(Exception):
+ """Custom exception."""
+ pass
+
+class KickStartParser(ArgumentParser):
+ """
+ This class overwrites error method to throw exception
+ instead of producing usage message(default argparse behavior).
+ """
+ def error(self, message):
+ raise ArgumentError(None, message)
+
+def sizetype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts size string in <num>[K|k|M|G] format into the integer value
+ """
+ if arg.isdigit():
+ return int(arg) * 1024
+
+ if not arg[:-1].isdigit():
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+
+ size = int(arg[:-1])
+ if arg.endswith("k") or arg.endswith("K"):
+ return size
+ if arg.endswith("M"):
+ return size * 1024
+ if arg.endswith("G"):
+ return size * 1024 * 1024
+
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+
+def overheadtype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts overhead string to float and checks if it's bigger than 1.0
+ """
+ try:
+ result = float(arg)
+ except ValueError:
+ raise ArgumentTypeError("Invalid value: %r" % arg)
+
+ if result < 1.0:
+ raise ArgumentTypeError("Overhead factor should be > 1.0" % arg)
+
+ return result
+
+def cannedpathtype(arg):
+ """
+ Custom type for ArgumentParser
+ Tries to find file in the list of canned wks paths
+ """
+ scripts_path = os.path.abspath(os.path.dirname(__file__) + '../../..')
+ result = find_canned(scripts_path, arg)
+ if not result:
+ raise ArgumentTypeError("file not found: %s" % arg)
+ return result
+
+def systemidtype(arg):
+ """
+ Custom type for ArgumentParser
+ Checks if the argument sutisfies system id requirements,
+ i.e. if it's one byte long integer > 0
+ """
+ error = "Invalid system type: %s. must be hex "\
+ "between 0x1 and 0xFF" % arg
+ try:
+ result = int(arg, 16)
+ except ValueError:
+ raise ArgumentTypeError(error)
+
+ if result <= 0 or result > 0xff:
+ raise ArgumentTypeError(error)
+
+ return arg
+
+class KickStart():
+ """Kickstart parser implementation."""
+
+ DEFAULT_EXTRA_SPACE = 10*1024
+ DEFAULT_OVERHEAD_FACTOR = 1.3
+
+ def __init__(self, confpath):
+
+ self.partitions = []
+ self.bootloader = None
+ self.lineno = 0
+ self.partnum = 0
+
+ parser = KickStartParser()
+ subparsers = parser.add_subparsers()
+
+ part = subparsers.add_parser('part')
+ part.add_argument('mountpoint', nargs='?')
+ part.add_argument('--active', action='store_true')
+ part.add_argument('--align', type=int)
+ part.add_argument('--exclude-path', nargs='+')
+ part.add_argument("--extra-space", type=sizetype)
+ part.add_argument('--fsoptions', dest='fsopts')
+ part.add_argument('--fstype', default='vfat',
+ choices=('ext2', 'ext3', 'ext4', 'btrfs',
+ 'squashfs', 'vfat', 'msdos', 'swap'))
+ part.add_argument('--mkfs-extraopts', default='')
+ part.add_argument('--label')
+ part.add_argument('--no-table', action='store_true')
+ part.add_argument('--ondisk', '--ondrive', dest='disk', default='sda')
+ part.add_argument("--overhead-factor", type=overheadtype)
+ part.add_argument('--part-name')
+ part.add_argument('--part-type')
+ part.add_argument('--rootfs-dir')
+
+ # --size and --fixed-size cannot be specified together; options
+ # ----extra-space and --overhead-factor should also raise a parser
+ # --error, but since nesting mutually exclusive groups does not work,
+ # ----extra-space/--overhead-factor are handled later
+ sizeexcl = part.add_mutually_exclusive_group()
+ sizeexcl.add_argument('--size', type=sizetype, default=0)
+ sizeexcl.add_argument('--fixed-size', type=sizetype, default=0)
+
+ part.add_argument('--source')
+ part.add_argument('--sourceparams')
+ part.add_argument('--system-id', type=systemidtype)
+ part.add_argument('--use-uuid', action='store_true')
+ part.add_argument('--uuid')
+ part.add_argument('--fsuuid')
+
+ bootloader = subparsers.add_parser('bootloader')
+ bootloader.add_argument('--append')
+ bootloader.add_argument('--configfile')
+ bootloader.add_argument('--ptable', choices=('msdos', 'gpt'),
+ default='msdos')
+ bootloader.add_argument('--timeout', type=int)
+ bootloader.add_argument('--source')
+
+ include = subparsers.add_parser('include')
+ include.add_argument('path', type=cannedpathtype)
+
+ self._parse(parser, confpath)
+ if not self.bootloader:
+ logger.warning('bootloader config not specified, using defaults\n')
+ self.bootloader = bootloader.parse_args([])
+
+ def _parse(self, parser, confpath):
+ """
+ Parse file in .wks format using provided parser.
+ """
+ with open(confpath) as conf:
+ lineno = 0
+ for line in conf:
+ line = line.strip()
+ lineno += 1
+ if line and line[0] != '#':
+ try:
+ line_args = shlex.split(line)
+ parsed = parser.parse_args(line_args)
+ except ArgumentError as err:
+ raise KickStartError('%s:%d: %s' % \
+ (confpath, lineno, err))
+ if line.startswith('part'):
+ # SquashFS does not support UUID
+ if parsed.fstype == 'squashfs' and parsed.use_uuid:
+ err = "%s:%d: SquashFS does not support UUID" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ # using ArgumentParser one cannot easily tell if option
+ # was passed as argument, if said option has a default
+ # value; --overhead-factor/--extra-space cannot be used
+ # with --fixed-size, so at least detect when these were
+ # passed with non-0 values ...
+ if parsed.fixed_size:
+ if parsed.overhead_factor or parsed.extra_space:
+ err = "%s:%d: arguments --overhead-factor and --extra-space not "\
+ "allowed with argument --fixed-size" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ else:
+ # ... and provide defaults if not using
+ # --fixed-size iff given option was not used
+ # (again, one cannot tell if option was passed but
+ # with value equal to 0)
+ if '--overhead-factor' not in line_args:
+ parsed.overhead_factor = self.DEFAULT_OVERHEAD_FACTOR
+ if '--extra-space' not in line_args:
+ parsed.extra_space = self.DEFAULT_EXTRA_SPACE
+
+ self.partnum += 1
+ self.partitions.append(Partition(parsed, self.partnum))
+ elif line.startswith('include'):
+ self._parse(parser, parsed.path)
+ elif line.startswith('bootloader'):
+ if not self.bootloader:
+ self.bootloader = parsed
+ else:
+ err = "%s:%d: more than one bootloader specified" \
+ % (confpath, lineno)
+ raise KickStartError(err)
diff --git a/poky/scripts/lib/wic/misc.py b/poky/scripts/lib/wic/misc.py
new file mode 100644
index 000000000..ee888b478
--- /dev/null
+++ b/poky/scripts/lib/wic/misc.py
@@ -0,0 +1,263 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides a place to collect various wic-related utils
+# for the OpenEmbedded Image Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+"""Miscellaneous functions."""
+
+import logging
+import os
+import re
+import subprocess
+
+from collections import defaultdict
+from distutils import spawn
+
+from wic import WicError
+
+logger = logging.getLogger('wic')
+
+# executable -> recipe pairs for exec_native_cmd
+NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+ "grub-mkimage": "grub-efi",
+ "isohybrid": "syslinux",
+ "mcopy": "mtools",
+ "mdel" : "mtools",
+ "mdeltree" : "mtools",
+ "mdir" : "mtools",
+ "mkdosfs": "dosfstools",
+ "mkisofs": "cdrtools",
+ "mkfs.btrfs": "btrfs-tools",
+ "mkfs.ext2": "e2fsprogs",
+ "mkfs.ext3": "e2fsprogs",
+ "mkfs.ext4": "e2fsprogs",
+ "mkfs.vfat": "dosfstools",
+ "mksquashfs": "squashfs-tools",
+ "mkswap": "util-linux",
+ "mmd": "mtools",
+ "parted": "parted",
+ "sfdisk": "util-linux",
+ "sgdisk": "gptfdisk",
+ "syslinux": "syslinux"
+ }
+
+def runtool(cmdln_or_args):
+ """ wrapper for most of the subprocess calls
+ input:
+ cmdln_or_args: can be both args and cmdln str (shell=True)
+ return:
+ rc, output
+ """
+ if isinstance(cmdln_or_args, list):
+ cmd = cmdln_or_args[0]
+ shell = False
+ else:
+ import shlex
+ cmd = shlex.split(cmdln_or_args)[0]
+ shell = True
+
+ sout = subprocess.PIPE
+ serr = subprocess.STDOUT
+
+ try:
+ process = subprocess.Popen(cmdln_or_args, stdout=sout,
+ stderr=serr, shell=shell)
+ sout, serr = process.communicate()
+ # combine stdout and stderr, filter None out and decode
+ out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
+ except OSError as err:
+ if err.errno == 2:
+ # [Errno 2] No such file or directory
+ raise WicError('Cannot run command: %s, lost dependency?' % cmd)
+ else:
+ raise # relay
+
+ return process.returncode, out
+
+def _exec_cmd(cmd_and_args, as_shell=False):
+ """
+ Execute command, catching stderr, stdout
+
+ Need to execute as_shell if the command uses wildcards
+ """
+ logger.debug("_exec_cmd: %s", cmd_and_args)
+ args = cmd_and_args.split()
+ logger.debug(args)
+
+ if as_shell:
+ ret, out = runtool(cmd_and_args)
+ else:
+ ret, out = runtool(args)
+ out = out.strip()
+ if ret != 0:
+ raise WicError("_exec_cmd: %s returned '%s' instead of 0\noutput: %s" % \
+ (cmd_and_args, ret, out))
+
+ logger.debug("_exec_cmd: output for %s (rc = %d): %s",
+ cmd_and_args, ret, out)
+
+ return ret, out
+
+
+def exec_cmd(cmd_and_args, as_shell=False):
+ """
+ Execute command, return output
+ """
+ return _exec_cmd(cmd_and_args, as_shell)[1]
+
+
+def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
+ """
+ Execute native command, catching stderr, stdout
+
+ Need to execute as_shell if the command uses wildcards
+
+ Always need to execute native commands as_shell
+ """
+ # The reason -1 is used is because there may be "export" commands.
+ args = cmd_and_args.split(';')[-1].split()
+ logger.debug(args)
+
+ if pseudo:
+ cmd_and_args = pseudo + cmd_and_args
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+ (native_sysroot, native_sysroot, native_sysroot)
+
+ native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
+ (native_paths, cmd_and_args)
+ logger.debug("exec_native_cmd: %s", native_cmd_and_args)
+
+ # If the command isn't in the native sysroot say we failed.
+ if spawn.find_executable(args[0], native_paths):
+ ret, out = _exec_cmd(native_cmd_and_args, True)
+ else:
+ ret = 127
+ out = "can't find native executable %s in %s" % (args[0], native_paths)
+
+ prog = args[0]
+ # shell command-not-found
+ if ret == 127 \
+ or (pseudo and ret == 1 and out == "Can't find '%s' in $PATH." % prog):
+ msg = "A native program %s required to build the image "\
+ "was not found (see details above).\n\n" % prog
+ recipe = NATIVE_RECIPES.get(prog)
+ if recipe:
+ msg += "Please make sure wic-tools have %s-native in its DEPENDS, "\
+ "build it with 'bitbake wic-tools' and try again.\n" % recipe
+ else:
+ msg += "Wic failed to find a recipe to build native %s. Please "\
+ "file a bug against wic.\n" % prog
+ raise WicError(msg)
+
+ return ret, out
+
+BOOTDD_EXTRA_SPACE = 16384
+
+class BitbakeVars(defaultdict):
+ """
+ Container for Bitbake variables.
+ """
+ def __init__(self):
+ defaultdict.__init__(self, dict)
+
+ # default_image and vars_dir attributes should be set from outside
+ self.default_image = None
+ self.vars_dir = None
+
+ def _parse_line(self, line, image, matcher=re.compile(r"^([a-zA-Z0-9\-_+./~]+)=(.*)")):
+ """
+ Parse one line from bitbake -e output or from .env file.
+ Put result key-value pair into the storage.
+ """
+ if "=" not in line:
+ return
+ match = matcher.match(line)
+ if not match:
+ return
+ key, val = match.groups()
+ self[image][key] = val.strip('"')
+
+ def get_var(self, var, image=None, cache=True):
+ """
+ Get bitbake variable from 'bitbake -e' output or from .env file.
+ This is a lazy method, i.e. it runs bitbake or parses file only when
+ only when variable is requested. It also caches results.
+ """
+ if not image:
+ image = self.default_image
+
+ if image not in self:
+ if image and self.vars_dir:
+ fname = os.path.join(self.vars_dir, image + '.env')
+ if os.path.isfile(fname):
+ # parse .env file
+ with open(fname) as varsfile:
+ for line in varsfile:
+ self._parse_line(line, image)
+ else:
+ print("Couldn't get bitbake variable from %s." % fname)
+ print("File %s doesn't exist." % fname)
+ return
+ else:
+ # Get bitbake -e output
+ cmd = "bitbake -e"
+ if image:
+ cmd += " %s" % image
+
+ log_level = logger.getEffectiveLevel()
+ logger.setLevel(logging.INFO)
+ ret, lines = _exec_cmd(cmd)
+ logger.setLevel(log_level)
+
+ if ret:
+ logger.error("Couldn't get '%s' output.", cmd)
+ logger.error("Bitbake failed with error:\n%s\n", lines)
+ return
+
+ # Parse bitbake -e output
+ for line in lines.split('\n'):
+ self._parse_line(line, image)
+
+ # Make first image a default set of variables
+ if cache:
+ images = [key for key in self if key]
+ if len(images) == 1:
+ self[None] = self[image]
+
+ result = self[image].get(var)
+ if not cache:
+ self.pop(image, None)
+
+ return result
+
+# Create BB_VARS singleton
+BB_VARS = BitbakeVars()
+
+def get_bitbake_var(var, image=None, cache=True):
+ """
+ Provide old get_bitbake_var API by wrapping
+ get_var method of BB_VARS singleton.
+ """
+ return BB_VARS.get_var(var, image, cache)
diff --git a/poky/scripts/lib/wic/partition.py b/poky/scripts/lib/wic/partition.py
new file mode 100644
index 000000000..3fe5c4e26
--- /dev/null
+++ b/poky/scripts/lib/wic/partition.py
@@ -0,0 +1,425 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013-2016 Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides the OpenEmbedded partition object definitions.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+# Ed Bartosh <ed.bartosh> (at] linux.intel.com>
+
+import logging
+import os
+import uuid
+
+from wic import WicError
+from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+from wic.pluginbase import PluginMgr
+
+logger = logging.getLogger('wic')
+
+class Partition():
+
+ def __init__(self, args, lineno):
+ self.args = args
+ self.active = args.active
+ self.align = args.align
+ self.disk = args.disk
+ self.device = None
+ self.extra_space = args.extra_space
+ self.exclude_path = args.exclude_path
+ self.fsopts = args.fsopts
+ self.fstype = args.fstype
+ self.label = args.label
+ self.mkfs_extraopts = args.mkfs_extraopts
+ self.mountpoint = args.mountpoint
+ self.no_table = args.no_table
+ self.num = None
+ self.overhead_factor = args.overhead_factor
+ self.part_name = args.part_name
+ self.part_type = args.part_type
+ self.rootfs_dir = args.rootfs_dir
+ self.size = args.size
+ self.fixed_size = args.fixed_size
+ self.source = args.source
+ self.sourceparams = args.sourceparams
+ self.system_id = args.system_id
+ self.use_uuid = args.use_uuid
+ self.uuid = args.uuid
+ self.fsuuid = args.fsuuid
+
+ self.lineno = lineno
+ self.source_file = ""
+ self.sourceparams_dict = {}
+
+ def get_extra_block_count(self, current_blocks):
+ """
+ The --size param is reflected in self.size (in kB), and we already
+ have current_blocks (1k) blocks, calculate and return the
+ number of (1k) blocks we need to add to get to --size, 0 if
+ we're already there or beyond.
+ """
+ logger.debug("Requested partition size for %s: %d",
+ self.mountpoint, self.size)
+
+ if not self.size:
+ return 0
+
+ requested_blocks = self.size
+
+ logger.debug("Requested blocks %d, current_blocks %d",
+ requested_blocks, current_blocks)
+
+ if requested_blocks > current_blocks:
+ return requested_blocks - current_blocks
+ else:
+ return 0
+
+ def get_rootfs_size(self, actual_rootfs_size=0):
+ """
+ Calculate the required size of rootfs taking into consideration
+ --size/--fixed-size flags as well as overhead and extra space, as
+ specified in kickstart file. Raises an error if the
+ `actual_rootfs_size` is larger than fixed-size rootfs.
+
+ """
+ if self.fixed_size:
+ rootfs_size = self.fixed_size
+ if actual_rootfs_size > rootfs_size:
+ raise WicError("Actual rootfs size (%d kB) is larger than "
+ "allowed size %d kB" %
+ (actual_rootfs_size, rootfs_size))
+ else:
+ extra_blocks = self.get_extra_block_count(actual_rootfs_size)
+ if extra_blocks < self.extra_space:
+ extra_blocks = self.extra_space
+
+ rootfs_size = actual_rootfs_size + extra_blocks
+ rootfs_size *= self.overhead_factor
+
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, self.mountpoint, rootfs_size)
+
+ return rootfs_size
+
+ @property
+ def disk_size(self):
+ """
+ Obtain on-disk size of partition taking into consideration
+ --size/--fixed-size options.
+
+ """
+ return self.fixed_size if self.fixed_size else self.size
+
+ def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Prepare content for individual partitions, depending on
+ partition command parameters.
+ """
+ if not self.source:
+ if not self.size and not self.fixed_size:
+ raise WicError("The %s partition has a size of zero. Please "
+ "specify a non-zero --size/--fixed-size for that "
+ "partition." % self.mountpoint)
+
+ if self.fstype == "swap":
+ self.prepare_swap_partition(cr_workdir, oe_builddir,
+ native_sysroot)
+ self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
+ else:
+ if self.fstype == 'squashfs':
+ raise WicError("It's not possible to create empty squashfs "
+ "partition '%s'" % (self.mountpoint))
+
+ rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
+ self.lineno, self.fstype)
+ if os.path.isfile(rootfs):
+ os.remove(rootfs)
+
+ prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+ method = getattr(self, "prepare_empty_partition_" + prefix)
+ method(rootfs, oe_builddir, native_sysroot)
+ self.source_file = rootfs
+ return
+
+ plugins = PluginMgr.get_plugins('source')
+
+ if self.source not in plugins:
+ raise WicError("The '%s' --source specified for %s doesn't exist.\n\t"
+ "See 'wic list source-plugins' for a list of available"
+ " --sources.\n\tSee 'wic help source-plugins' for "
+ "details on adding a new source plugin." %
+ (self.source, self.mountpoint))
+
+ srcparams_dict = {}
+ if self.sourceparams:
+ # Split sourceparams string of the form key1=val1[,key2=val2,...]
+ # into a dict. Also accepts valueless keys i.e. without =
+ splitted = self.sourceparams.split(',')
+ srcparams_dict = dict(par.split('=') for par in splitted if par)
+
+ plugin = PluginMgr.get_plugins('source')[self.source]
+ plugin.do_configure_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, native_sysroot)
+ plugin.do_stage_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, native_sysroot)
+ plugin.do_prepare_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, rootfs_dir, native_sysroot)
+ plugin.do_post_partition(self, srcparams_dict, creator,
+ cr_workdir, oe_builddir, bootimg_dir,
+ kernel_dir, rootfs_dir, native_sysroot)
+
+ # further processing required Partition.size to be an integer, make
+ # sure that it is one
+ if not isinstance(self.size, int):
+ raise WicError("Partition %s internal size is not an integer. "
+ "This a bug in source plugin %s and needs to be fixed." %
+ (self.mountpoint, self.source))
+
+ if self.fixed_size and self.size > self.fixed_size:
+ raise WicError("File system image of partition %s is "
+ "larger (%d kB) than its allowed size %d kB" %
+ (self.mountpoint, self.size, self.fixed_size))
+
+ def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
+ native_sysroot, real_rootfs = True):
+ """
+ Prepare content for a rootfs partition i.e. create a partition
+ and fill it from a /rootfs dir.
+
+ Currently handles ext2/3/4, btrfs, vfat and squashfs.
+ """
+ p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
+ p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
+ "%s/../pseudo" % get_bitbake_var("IMAGE_ROOTFS"))
+ p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
+ p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
+ pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
+ pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
+ pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+
+ rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
+ self.lineno, self.fstype)
+ if os.path.isfile(rootfs):
+ os.remove(rootfs)
+
+ # Get rootfs size from bitbake variable if it's not set in .ks file
+ if not self.size and real_rootfs:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ rsize_bb = get_bitbake_var('ROOTFS_SIZE')
+ if rsize_bb:
+ logger.warning('overhead-factor was specified, but size was not,'
+ ' so bitbake variables will be used for the size.'
+ ' In this case both IMAGE_OVERHEAD_FACTOR and '
+ '--overhead-factor will be applied')
+ self.size = int(round(float(rsize_bb)))
+
+ prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+ method = getattr(self, "prepare_rootfs_" + prefix)
+ method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ self.source_file = rootfs
+
+ # get the rootfs size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % rootfs
+ out = exec_cmd(du_cmd)
+ self.size = int(out.split()[0])
+
+ def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for an ext2/3/4 rootfs partition.
+ """
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ actual_rootfs_size = int(out.split()[0])
+
+ rootfs_size = self.get_rootfs_size(actual_rootfs_size)
+
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), rootfs_size * 1024)
+
+ extraopts = self.mkfs_extraopts or "-F -i 8192"
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s %s %s %s -U %s -d %s" % \
+ (self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a btrfs rootfs partition.
+ """
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ actual_rootfs_size = int(out.split()[0])
+
+ rootfs_size = self.get_rootfs_size(actual_rootfs_size)
+
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), rootfs_size * 1024)
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -b %d -r %s %s %s -U %s %s" % \
+ (self.fstype, rootfs_size * 1024, rootfs_dir, label_str,
+ self.mkfs_extraopts, self.fsuuid, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a msdos/vfat rootfs partition.
+ """
+ du_cmd = "du -bks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ rootfs_size = self.get_rootfs_size(blocks)
+
+ label_str = "-n boot"
+ if self.label:
+ label_str = "-n %s" % self.label
+
+ size_str = ""
+ if self.fstype == 'msdos':
+ size_str = "-F 16" # FAT 16
+
+ extraopts = self.mkfs_extraopts or '-S 512'
+
+ dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
+ (label_str, self.fsuuid, size_str, extraopts, rootfs,
+ max(8250, rootfs_size))
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % rootfs
+ exec_cmd(chmod_cmd)
+
+ prepare_rootfs_vfat = prepare_rootfs_msdos
+
+ def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a squashfs rootfs partition.
+ """
+ extraopts = self.mkfs_extraopts or '-noappend'
+ squashfs_cmd = "mksquashfs %s %s %s" % \
+ (rootfs_dir, rootfs, extraopts)
+ exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_empty_partition_ext(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty ext2/3/4 partition.
+ """
+ size = self.disk_size
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), size * 1024)
+
+ extraopts = self.mkfs_extraopts or "-i 8192"
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -F %s %s -U %s %s" % \
+ (self.fstype, extraopts, label_str, self.fsuuid, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot)
+
+ def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty btrfs partition.
+ """
+ size = self.disk_size
+ with open(rootfs, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), size * 1024)
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkfs_cmd = "mkfs.%s -b %d %s -U %s %s %s" % \
+ (self.fstype, self.size * 1024, label_str, self.fsuuid,
+ self.mkfs_extraopts, rootfs)
+ exec_native_cmd(mkfs_cmd, native_sysroot)
+
+ def prepare_empty_partition_msdos(self, rootfs, oe_builddir,
+ native_sysroot):
+ """
+ Prepare an empty vfat partition.
+ """
+ blocks = self.disk_size
+
+ label_str = "-n boot"
+ if self.label:
+ label_str = "-n %s" % self.label
+
+ size_str = ""
+ if self.fstype == 'msdos':
+ size_str = "-F 16" # FAT 16
+
+ extraopts = self.mkfs_extraopts or '-S 512'
+
+ dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
+ (label_str, self.fsuuid, extraopts, size_str, rootfs,
+ blocks)
+
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % rootfs
+ exec_cmd(chmod_cmd)
+
+ prepare_empty_partition_vfat = prepare_empty_partition_msdos
+
+ def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
+ """
+ Prepare a swap partition.
+ """
+ path = "%s/fs.%s" % (cr_workdir, self.fstype)
+
+ with open(path, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), self.size * 1024)
+
+ label_str = ""
+ if self.label:
+ label_str = "-L %s" % self.label
+
+ mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
+ exec_native_cmd(mkswap_cmd, native_sysroot)
diff --git a/poky/scripts/lib/wic/pluginbase.py b/poky/scripts/lib/wic/pluginbase.py
new file mode 100644
index 000000000..686d2fee3
--- /dev/null
+++ b/poky/scripts/lib/wic/pluginbase.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+__all__ = ['ImagerPlugin', 'SourcePlugin']
+
+import os
+import logging
+
+from collections import defaultdict
+from importlib.machinery import SourceFileLoader
+
+from wic import WicError
+from wic.misc import get_bitbake_var
+
+PLUGIN_TYPES = ["imager", "source"]
+
+SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+
+logger = logging.getLogger('wic')
+
+PLUGINS = defaultdict(dict)
+
+class PluginMgr:
+ _plugin_dirs = []
+
+ @classmethod
+ def get_plugins(cls, ptype):
+ """Get dictionary of <plugin_name>:<class> pairs."""
+ if ptype not in PLUGIN_TYPES:
+ raise WicError('%s is not valid plugin type' % ptype)
+
+ # collect plugin directories
+ if not cls._plugin_dirs:
+ cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
+ layers = get_bitbake_var("BBLAYERS") or ''
+ for layer_path in layers.split():
+ path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
+
+ if ptype not in PLUGINS:
+ # load all ptype plugins
+ for pdir in cls._plugin_dirs:
+ ppath = os.path.join(pdir, ptype)
+ if os.path.isdir(ppath):
+ for fname in os.listdir(ppath):
+ if fname.endswith('.py'):
+ mname = fname[:-3]
+ mpath = os.path.join(ppath, fname)
+ logger.debug("loading plugin module %s", mpath)
+ SourceFileLoader(mname, mpath).load_module()
+
+ return PLUGINS.get(ptype)
+
+class PluginMeta(type):
+ def __new__(cls, name, bases, attrs):
+ class_type = type.__new__(cls, name, bases, attrs)
+ if 'name' in attrs:
+ PLUGINS[class_type.wic_plugin_type][attrs['name']] = class_type
+
+ return class_type
+
+class ImagerPlugin(metaclass=PluginMeta):
+ wic_plugin_type = "imager"
+
+ def do_create(self):
+ raise WicError("Method %s.do_create is not implemented" %
+ self.__class__.__name__)
+
+class SourcePlugin(metaclass=PluginMeta):
+ wic_plugin_type = "source"
+ """
+ The methods that can be implemented by --source plugins.
+
+ Any methods not implemented in a subclass inherit these.
+ """
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. This provides a hook to allow finalization of a
+ disk image e.g. to write an MBR to it.
+ """
+ logger.debug("SourcePlugin: do_install_disk: disk: %s", disk_name)
+
+ @classmethod
+ def do_stage_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Special content staging hook called before do_prepare_partition(),
+ normally empty.
+
+ Typically, a partition will just use the passed-in parame e.g
+ straight bootimg_dir, etc, but in some cases, things need to
+ be more tailored e.g. to use a deploy dir + /boot, etc. This
+ hook allows those files to be staged in a customized fashion.
+ Not that get_bitbake_var() allows you to acces non-standard
+ variables that you might want to use for this.
+ """
+ logger.debug("SourcePlugin: do_stage_partition: part: %s", part)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), typically used to create
+ custom configuration files for a partition, for example
+ syslinux or grub config files.
+ """
+ logger.debug("SourcePlugin: do_configure_partition: part: %s", part)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
+ native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ logger.debug("SourcePlugin: do_prepare_partition: part: %s", part)
+
+ @classmethod
+ def do_post_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,
+ native_sysroot):
+ """
+ Called after the partition is created. It is useful to add post
+ operations e.g. security signing the partition.
+ """
+ logger.debug("SourcePlugin: do_post_partition: part: %s", part)
diff --git a/poky/scripts/lib/wic/plugins/imager/direct.py b/poky/scripts/lib/wic/plugins/imager/direct.py
new file mode 100644
index 000000000..1fa6b917e
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/imager/direct.py
@@ -0,0 +1,607 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'direct' imager plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+import random
+import shutil
+import tempfile
+import uuid
+
+from time import strftime
+
+from oe.path import copyhardlinktree
+
+from wic import WicError
+from wic.filemap import sparse_copy
+from wic.ksparser import KickStart, KickStartError
+from wic.pluginbase import PluginMgr, ImagerPlugin
+from wic.misc import get_bitbake_var, exec_cmd, exec_native_cmd
+
+logger = logging.getLogger('wic')
+
+class DirectPlugin(ImagerPlugin):
+ """
+ Install a system into a file containing a partitioned disk image.
+
+ An image file is formatted with a partition table, each partition
+ created from a rootfs or other OpenEmbedded build artifact and dd'ed
+ into the virtual disk. The disk image can subsequently be dd'ed onto
+ media and used on actual hardware.
+ """
+ name = 'direct'
+
+ def __init__(self, wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, oe_builddir, options):
+ try:
+ self.ks = KickStart(wks_file)
+ except KickStartError as err:
+ raise WicError(str(err))
+
+ # parse possible 'rootfs=name' items
+ self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
+ self.replaced_rootfs_paths = {}
+ self.bootimg_dir = bootimg_dir
+ self.kernel_dir = kernel_dir
+ self.native_sysroot = native_sysroot
+ self.oe_builddir = oe_builddir
+
+ self.outdir = options.outdir
+ self.compressor = options.compressor
+ self.bmap = options.bmap
+ self.no_fstab_update = options.no_fstab_update
+
+ self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
+ strftime("%Y%m%d%H%M"))
+ self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self._image = None
+ self.ptable_format = self.ks.bootloader.ptable
+ self.parts = self.ks.partitions
+
+ # as a convenience, set source to the boot partition source
+ # instead of forcing it to be set via bootloader --source
+ for part in self.parts:
+ if not self.ks.bootloader.source and part.mountpoint == "/boot":
+ self.ks.bootloader.source = part.source
+ break
+
+ image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
+ self._image = PartitionedImage(image_path, self.ptable_format,
+ self.parts, self.native_sysroot)
+
+ def do_create(self):
+ """
+ Plugin entry point.
+ """
+ try:
+ self.create()
+ self.assemble()
+ self.finalize()
+ self.print_info()
+ finally:
+ self.cleanup()
+
+ def _write_fstab(self, image_rootfs):
+ """overriden to generate fstab (temporarily) in rootfs. This is called
+ from _create, make sure it doesn't get called from
+ BaseImage.create()
+ """
+ if not image_rootfs:
+ return
+
+ fstab_path = image_rootfs + "/etc/fstab"
+ if not os.path.isfile(fstab_path):
+ return
+
+ with open(fstab_path) as fstab:
+ fstab_lines = fstab.readlines()
+
+ if self._update_fstab(fstab_lines, self.parts):
+ # copy rootfs dir to workdir to update fstab
+ # as rootfs can be used by other tasks and can't be modified
+ new_rootfs = os.path.realpath(os.path.join(self.workdir, "rootfs_copy"))
+ copyhardlinktree(image_rootfs, new_rootfs)
+ fstab_path = os.path.join(new_rootfs, 'etc/fstab')
+
+ os.unlink(fstab_path)
+
+ with open(fstab_path, "w") as fstab:
+ fstab.writelines(fstab_lines)
+
+ return new_rootfs
+
+ def _update_fstab(self, fstab_lines, parts):
+ """Assume partition order same as in wks"""
+ updated = False
+ for part in parts:
+ if not part.realnum or not part.mountpoint \
+ or part.mountpoint == "/":
+ continue
+
+ if part.use_uuid:
+ if part.fsuuid:
+ # FAT UUID is different from others
+ if len(part.fsuuid) == 10:
+ device_name = "UUID=%s-%s" % \
+ (part.fsuuid[2:6], part.fsuuid[6:])
+ else:
+ device_name = "UUID=%s" % part.fsuuid
+ else:
+ device_name = "PARTUUID=%s" % part.uuid
+ else:
+ # mmc device partitions are named mmcblk0p1, mmcblk0p2..
+ prefix = 'p' if part.disk.startswith('mmcblk') else ''
+ device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
+
+ opts = part.fsopts if part.fsopts else "defaults"
+ line = "\t".join([device_name, part.mountpoint, part.fstype,
+ opts, "0", "0"]) + "\n"
+
+ fstab_lines.append(line)
+ updated = True
+
+ return updated
+
+ def _full_path(self, path, name, extention):
+ """ Construct full file path to a file we generate. """
+ return os.path.join(path, "%s-%s.%s" % (self.name, name, extention))
+
+ #
+ # Actual implemention
+ #
+ def create(self):
+ """
+ For 'wic', we already have our build artifacts - we just create
+ filesystems from the artifacts directly and combine them into
+ a partitioned image.
+ """
+ if self.no_fstab_update:
+ new_rootfs = None
+ else:
+ new_rootfs = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+ if new_rootfs:
+ # rootfs was copied to update fstab
+ self.replaced_rootfs_paths[new_rootfs] = self.rootfs_dir['ROOTFS_DIR']
+ self.rootfs_dir['ROOTFS_DIR'] = new_rootfs
+
+ for part in self.parts:
+ # get rootfs size from bitbake variable if it's not set in .ks file
+ if not part.size:
+ # and if rootfs name is specified for the partition
+ image_name = self.rootfs_dir.get(part.rootfs_dir)
+ if image_name and os.path.sep not in image_name:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
+ if rsize_bb:
+ part.size = int(round(float(rsize_bb)))
+
+ self._image.prepare(self)
+ self._image.layout_partitions()
+ self._image.create()
+
+ def assemble(self):
+ """
+ Assemble partitions into disk image
+ """
+ self._image.assemble()
+
+ def finalize(self):
+ """
+ Finalize the disk image.
+
+ For example, prepare the image to be bootable by e.g.
+ creating and installing a bootloader configuration.
+ """
+ source_plugin = self.ks.bootloader.source
+ disk_name = self.parts[0].disk
+ if source_plugin:
+ plugin = PluginMgr.get_plugins('source')[source_plugin]
+ plugin.do_install_disk(self._image, disk_name, self, self.workdir,
+ self.oe_builddir, self.bootimg_dir,
+ self.kernel_dir, self.native_sysroot)
+
+ full_path = self._image.path
+ # Generate .bmap
+ if self.bmap:
+ logger.debug("Generating bmap file for %s", disk_name)
+ python = os.path.join(self.native_sysroot, 'usr/bin/python3-native/python3')
+ bmaptool = os.path.join(self.native_sysroot, 'usr/bin/bmaptool')
+ exec_native_cmd("%s %s create %s -o %s.bmap" % \
+ (python, bmaptool, full_path, full_path), self.native_sysroot)
+ # Compress the image
+ if self.compressor:
+ logger.debug("Compressing disk %s with %s", disk_name, self.compressor)
+ exec_cmd("%s %s" % (self.compressor, full_path))
+
+ def print_info(self):
+ """
+ Print the image(s) and artifacts used, for the user.
+ """
+ msg = "The new image(s) can be found here:\n"
+
+ extension = "direct" + {"gzip": ".gz",
+ "bzip2": ".bz2",
+ "xz": ".xz",
+ None: ""}.get(self.compressor)
+ full_path = self._full_path(self.outdir, self.parts[0].disk, extension)
+ msg += ' %s\n\n' % full_path
+
+ msg += 'The following build artifacts were used to create the image(s):\n'
+ for part in self.parts:
+ if part.rootfs_dir is None:
+ continue
+ if part.mountpoint == '/':
+ suffix = ':'
+ else:
+ suffix = '["%s"]:' % (part.mountpoint or part.label)
+ rootdir = part.rootfs_dir
+ if rootdir in self.replaced_rootfs_paths:
+ rootdir = self.replaced_rootfs_paths[rootdir]
+ msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
+
+ msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
+ msg += ' KERNEL_DIR: %s\n' % self.kernel_dir
+ msg += ' NATIVE_SYSROOT: %s\n' % self.native_sysroot
+
+ logger.info(msg)
+
+ @property
+ def rootdev(self):
+ """
+ Get root device name to use as a 'root' parameter
+ in kernel command line.
+
+ Assume partition order same as in wks
+ """
+ for part in self.parts:
+ if part.mountpoint == "/":
+ if part.uuid:
+ return "PARTUUID=%s" % part.uuid
+ else:
+ suffix = 'p' if part.disk.startswith('mmcblk') else ''
+ return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
+
+ def cleanup(self):
+ if self._image:
+ self._image.cleanup()
+
+ # Move results to the output dir
+ if not os.path.exists(self.outdir):
+ os.makedirs(self.outdir)
+
+ for fname in os.listdir(self.workdir):
+ path = os.path.join(self.workdir, fname)
+ if os.path.isfile(path):
+ shutil.move(path, os.path.join(self.outdir, fname))
+
+ # remove work directory
+ shutil.rmtree(self.workdir, ignore_errors=True)
+
+# Overhead of the MBR partitioning scheme (just one sector)
+MBR_OVERHEAD = 1
+
+# Overhead of the GPT partitioning scheme
+GPT_OVERHEAD = 34
+
+# Size of a sector in bytes
+SECTOR_SIZE = 512
+
+class PartitionedImage():
+ """
+ Partitioned image in a file.
+ """
+
+ def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+ self.path = path # Path to the image file
+ self.numpart = 0 # Number of allocated partitions
+ self.realpart = 0 # Number of partitions in the partition table
+ self.offset = 0 # Offset of next partition (in sectors)
+ self.min_size = 0 # Minimum required disk size to fit
+ # all partitions (in bytes)
+ self.ptable_format = ptable_format # Partition table format
+ # Disk system identifier
+ self.identifier = random.SystemRandom().randint(1, 0xffffffff)
+
+ self.partitions = partitions
+ self.partimages = []
+ # Size of a sector used in calculations
+ self.sector_size = SECTOR_SIZE
+ self.native_sysroot = native_sysroot
+
+ # calculate the real partition number, accounting for partitions not
+ # in the partition table and logical partitions
+ realnum = 0
+ for part in self.partitions:
+ if part.no_table:
+ part.realnum = 0
+ else:
+ realnum += 1
+ if self.ptable_format == 'msdos' and realnum > 3 and len(partitions) > 4:
+ part.realnum = realnum + 1
+ continue
+ part.realnum = realnum
+
+ # generate parition and filesystem UUIDs
+ for part in self.partitions:
+ if not part.uuid and part.use_uuid:
+ if self.ptable_format == 'gpt':
+ part.uuid = str(uuid.uuid4())
+ else: # msdos partition table
+ part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
+ if not part.fsuuid:
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
+ else:
+ part.fsuuid = str(uuid.uuid4())
+
+ def prepare(self, imager):
+ """Prepare an image. Call prepare method of all image partitions."""
+ for part in self.partitions:
+ # need to create the filesystems in order to get their
+ # sizes before we can add them and do the layout.
+ part.prepare(imager, imager.workdir, imager.oe_builddir,
+ imager.rootfs_dir, imager.bootimg_dir,
+ imager.kernel_dir, imager.native_sysroot)
+
+ # Converting kB to sectors for parted
+ part.size_sec = part.disk_size * 1024 // self.sector_size
+
+ def layout_partitions(self):
+ """ Layout the partitions, meaning calculate the position of every
+ partition on the disk. The 'ptable_format' parameter defines the
+ partition table format and may be "msdos". """
+
+ logger.debug("Assigning %s partitions to disks", self.ptable_format)
+
+ # The number of primary and logical partitions. Extended partition and
+ # partitions not listed in the table are not included.
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
+
+ # Go through partitions in the order they are added in .ks file
+ for num in range(len(self.partitions)):
+ part = self.partitions[num]
+
+ if self.ptable_format == 'msdos' and part.part_name:
+ raise WicError("setting custom partition name is not " \
+ "implemented for msdos partitions")
+
+ if self.ptable_format == 'msdos' and part.part_type:
+ # The --part-type can also be implemented for MBR partitions,
+ # in which case it would map to the 1-byte "partition type"
+ # filed at offset 3 of the partition entry.
+ raise WicError("setting custom partition type is not " \
+ "implemented for msdos partitions")
+
+ # Get the disk where the partition is located
+ self.numpart += 1
+ if not part.no_table:
+ self.realpart += 1
+
+ if self.numpart == 1:
+ if self.ptable_format == "msdos":
+ overhead = MBR_OVERHEAD
+ elif self.ptable_format == "gpt":
+ overhead = GPT_OVERHEAD
+
+ # Skip one sector required for the partitioning scheme overhead
+ self.offset += overhead
+
+ if self.realpart > 3 and num_real_partitions > 4:
+ # Reserve a sector for EBR for every logical partition
+ # before alignment is performed.
+ if self.ptable_format == "msdos":
+ self.offset += 1
+
+ if part.align:
+ # If not first partition and we do have alignment set we need
+ # to align the partition.
+ # FIXME: This leaves a empty spaces to the disk. To fill the
+ # gaps we could enlargea the previous partition?
+
+ # Calc how much the alignment is off.
+ align_sectors = self.offset % (part.align * 1024 // self.sector_size)
+
+ if align_sectors:
+ # If partition is not aligned as required, we need
+ # to move forward to the next alignment point
+ align_sectors = (part.align * 1024 // self.sector_size) - align_sectors
+
+ logger.debug("Realignment for %s%s with %s sectors, original"
+ " offset %s, target alignment is %sK.",
+ part.disk, self.numpart, align_sectors,
+ self.offset, part.align)
+
+ # increase the offset so we actually start the partition on right alignment
+ self.offset += align_sectors
+
+ part.start = self.offset
+ self.offset += part.size_sec
+
+ part.type = 'primary'
+ if not part.no_table:
+ part.num = self.realpart
+ else:
+ part.num = 0
+
+ if self.ptable_format == "msdos":
+ # only count the partitions that are in partition table
+ if num_real_partitions > 4:
+ if self.realpart > 3:
+ part.type = 'logical'
+ part.num = self.realpart + 1
+
+ logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
+ "sectors (%d bytes).", part.mountpoint, part.disk,
+ part.num, part.start, self.offset - 1, part.size_sec,
+ part.size_sec * self.sector_size)
+
+ # Once all the partitions have been layed out, we can calculate the
+ # minumim disk size
+ self.min_size = self.offset
+ if self.ptable_format == "gpt":
+ self.min_size += GPT_OVERHEAD
+
+ self.min_size *= self.sector_size
+
+ def _create_partition(self, device, parttype, fstype, start, size):
+ """ Create a partition on an image described by the 'device' object. """
+
+ # Start is included to the size so we need to substract one from the end.
+ end = start + size - 1
+ logger.debug("Added '%s' partition, sectors %d-%d, size %d sectors",
+ parttype, start, end, size)
+
+ cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
+ if fstype:
+ cmd += " %s" % fstype
+ cmd += " %d %d" % (start, end)
+
+ return exec_native_cmd(cmd, self.native_sysroot)
+
+ def create(self):
+ logger.debug("Creating sparse file %s", self.path)
+ with open(self.path, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), self.min_size)
+
+ logger.debug("Initializing partition table for %s", self.path)
+ exec_native_cmd("parted -s %s mklabel %s" %
+ (self.path, self.ptable_format), self.native_sysroot)
+
+ logger.debug("Set disk identifier %x", self.identifier)
+ with open(self.path, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(self.identifier.to_bytes(4, 'little'))
+
+ logger.debug("Creating partitions")
+
+ for part in self.partitions:
+ if part.num == 0:
+ continue
+
+ if self.ptable_format == "msdos" and part.num == 5:
+ # Create an extended partition (note: extended
+ # partition is described in MBR and contains all
+ # logical partitions). The logical partitions save a
+ # sector for an EBR just before the start of a
+ # partition. The extended partition must start one
+ # sector before the start of the first logical
+ # partition. This way the first EBR is inside of the
+ # extended partition. Since the extended partitions
+ # starts a sector before the first logical partition,
+ # add a sector at the back, so that there is enough
+ # room for all logical partitions.
+ self._create_partition(self.path, "extended",
+ None, part.start - 1,
+ self.offset - part.start + 1)
+
+ if part.fstype == "swap":
+ parted_fs_type = "linux-swap"
+ elif part.fstype == "vfat":
+ parted_fs_type = "fat32"
+ elif part.fstype == "msdos":
+ parted_fs_type = "fat16"
+ if not part.system_id:
+ part.system_id = '0x6' # FAT16
+ else:
+ # Type for ext2/ext3/ext4/btrfs
+ parted_fs_type = "ext2"
+
+ # Boot ROM of OMAP boards require vfat boot partition to have an
+ # even number of sectors.
+ if part.mountpoint == "/boot" and part.fstype in ["vfat", "msdos"] \
+ and part.size_sec % 2:
+ logger.debug("Subtracting one sector from '%s' partition to "
+ "get even number of sectors for the partition",
+ part.mountpoint)
+ part.size_sec -= 1
+
+ self._create_partition(self.path, part.type,
+ parted_fs_type, part.start, part.size_sec)
+
+ if part.part_name:
+ logger.debug("partition %d: set name to %s",
+ part.num, part.part_name)
+ exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
+ (part.num, part.part_name,
+ self.path), self.native_sysroot)
+
+ if part.part_type:
+ logger.debug("partition %d: set type UID to %s",
+ part.num, part.part_type)
+ exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
+ (part.num, part.part_type,
+ self.path), self.native_sysroot)
+
+ if part.uuid and self.ptable_format == "gpt":
+ logger.debug("partition %d: set UUID to %s",
+ part.num, part.uuid)
+ exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
+ (part.num, part.uuid, self.path),
+ self.native_sysroot)
+
+ if part.label and self.ptable_format == "gpt":
+ logger.debug("partition %d: set name to %s",
+ part.num, part.label)
+ exec_native_cmd("parted -s %s name %d %s" % \
+ (self.path, part.num, part.label),
+ self.native_sysroot)
+
+ if part.active:
+ flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+ logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
+ flag_name, part.num, self.path)
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (self.path, part.num, flag_name),
+ self.native_sysroot)
+ if part.system_id:
+ exec_native_cmd("sfdisk --part-type %s %s %s" % \
+ (self.path, part.num, part.system_id),
+ self.native_sysroot)
+
+ def cleanup(self):
+ # remove partition images
+ for image in set(self.partimages):
+ os.remove(image)
+
+ def assemble(self):
+ logger.debug("Installing partitions")
+
+ for part in self.partitions:
+ source = part.source_file
+ if source:
+ # install source_file contents into a partition
+ sparse_copy(source, self.path, seek=part.start * self.sector_size)
+
+ logger.debug("Installed %s in partition %d, sectors %d-%d, "
+ "size %d sectors", source, part.num, part.start,
+ part.start + part.size_sec - 1, part.size_sec)
+
+ partimage = self.path + '.p%d' % part.num
+ os.rename(source, partimage)
+ self.partimages.append(partimage)
diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
new file mode 100644
index 000000000..beb74d7a7
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -0,0 +1,258 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-efi' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+import shutil
+
+from wic import WicError
+from wic.engine import get_custom_config
+from wic.pluginbase import SourcePlugin
+from wic.misc import (exec_cmd, exec_native_cmd,
+ get_bitbake_var, BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
+
+class BootimgEFIPlugin(SourcePlugin):
+ """
+ Create EFI boot partition.
+ This plugin supports GRUB 2 and systemd-boot bootloaders.
+ """
+
+ name = 'bootimg-efi'
+
+ @classmethod
+ def do_configure_grubefi(cls, creator, cr_workdir):
+ """
+ Create loader-specific (grub-efi) config
+ """
+ configfile = creator.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for grub
+ grubefi_conf = custom_cfg
+ logger.debug("Using custom configuration file "
+ "%s for grub.cfg", configfile)
+ else:
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ if not custom_cfg:
+ # Create grub configuration using parameters from wks file
+ bootloader = creator.ks.bootloader
+
+ grubefi_conf = ""
+ grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
+ grubefi_conf += "default=boot\n"
+ grubefi_conf += "timeout=%s\n" % bootloader.timeout
+ grubefi_conf += "menuentry 'boot'{\n"
+
+ kernel = "/bzImage"
+
+ grubefi_conf += "linux %s root=%s rootwait %s\n" \
+ % (kernel, creator.rootdev, bootloader.append)
+ grubefi_conf += "}\n"
+
+ logger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg",
+ cr_workdir)
+ cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
+ cfg.write(grubefi_conf)
+ cfg.close()
+
+ @classmethod
+ def do_configure_systemdboot(cls, hdddir, creator, cr_workdir, source_params):
+ """
+ Create loader-specific systemd-boot/gummiboot config
+ """
+ install_cmd = "install -d %s/loader" % hdddir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -d %s/loader/entries" % hdddir
+ exec_cmd(install_cmd)
+
+ bootloader = creator.ks.bootloader
+
+ loader_conf = ""
+ loader_conf += "default boot\n"
+ loader_conf += "timeout %d\n" % bootloader.timeout
+
+ initrd = source_params.get('initrd')
+
+ if initrd:
+ # obviously we need to have a common common deploy var
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ logger.debug("Ignoring missing initrd")
+
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/loader.conf", cr_workdir)
+ cfg = open("%s/hdd/boot/loader/loader.conf" % cr_workdir, "w")
+ cfg.write(loader_conf)
+ cfg.close()
+
+ configfile = creator.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for systemd-boot
+ boot_conf = custom_cfg
+ logger.debug("Using custom configuration file "
+ "%s for systemd-boots's boot.conf", configfile)
+ else:
+ raise WicError("configfile is specified but failed to "
+ "get it from %s.", configfile)
+
+ if not custom_cfg:
+ # Create systemd-boot configuration using parameters from wks file
+ kernel = "/bzImage"
+
+ boot_conf = ""
+ boot_conf += "title boot\n"
+ boot_conf += "linux %s\n" % kernel
+ boot_conf += "options LABEL=Boot root=%s %s\n" % \
+ (creator.rootdev, bootloader.append)
+
+ if initrd:
+ boot_conf += "initrd /%s\n" % initrd
+
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
+ cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
+ cfg.write(boot_conf)
+ cfg.close()
+
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates loader-specific config
+ """
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -d %s/EFI/BOOT" % hdddir
+ exec_cmd(install_cmd)
+
+ try:
+ if source_params['loader'] == 'grub-efi':
+ cls.do_configure_grubefi(creator, cr_workdir)
+ elif source_params['loader'] == 'systemd-boot':
+ cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
+ else:
+ raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+ except KeyError:
+ raise WicError("bootimg-efi requires a loader, none specified")
+
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for an EFI (grub) boot partition.
+ """
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ staging_kernel_dir = kernel_dir
+
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
+ (staging_kernel_dir, hdddir)
+ exec_cmd(install_cmd)
+
+
+ try:
+ if source_params['loader'] == 'grub-efi':
+ shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
+ "%s/grub.cfg" % cr_workdir)
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith("grub-efi-")]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[9:])
+ exec_cmd(cp_cmd, True)
+ shutil.move("%s/grub.cfg" % cr_workdir,
+ "%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
+ elif source_params['loader'] == 'systemd-boot':
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
+ exec_cmd(cp_cmd, True)
+ else:
+ raise WicError("unrecognized bootimg-efi loader: %s" %
+ source_params['loader'])
+ except KeyError:
+ raise WicError("bootimg-efi requires a loader, none specified")
+
+ startup = os.path.join(kernel_dir, "startup.nsh")
+ if os.path.exists(startup):
+ cp_cmd = "cp %s %s/" % (startup, hdddir)
+ exec_cmd(cp_cmd, True)
+
+ du_cmd = "du -bks %s" % hdddir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ extra_blocks = part.get_extra_block_count(blocks)
+
+ if extra_blocks < BOOTDD_EXTRA_SPACE:
+ extra_blocks = BOOTDD_EXTRA_SPACE
+
+ blocks += extra_blocks
+
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, part.mountpoint, blocks)
+
+ # dosfs image, created by mkdosfs
+ bootimg = "%s/boot.img" % cr_workdir
+
+ dosfs_cmd = "mkdosfs -n efi -i %s -C %s %d" % \
+ (part.fsuuid, bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ du_cmd = "du -Lbks %s" % bootimg
+ out = exec_cmd(du_cmd)
+ bootimg_size = out.split()[0]
+
+ part.size = int(bootimg_size)
+ part.source_file = bootimg
diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-partition.py b/poky/scripts/lib/wic/plugins/source/bootimg-partition.py
new file mode 100644
index 000000000..b239fc0b4
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -0,0 +1,132 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-partition' source plugin class for
+# 'wic'. The plugin creates an image of boot partition, copying over
+# files listed in IMAGE_BOOT_FILES bitbake variable.
+#
+# AUTHORS
+# Maciej Borzecki <maciej.borzecki (at] open-rnd.pl>
+#
+
+import logging
+import os
+import re
+
+from glob import glob
+
+from wic import WicError
+from wic.pluginbase import SourcePlugin
+from wic.misc import exec_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
+
+class BootimgPartitionPlugin(SourcePlugin):
+ """
+ Create an image of boot partition, copying over files
+ listed in IMAGE_BOOT_FILES bitbake variable.
+ """
+
+ name = 'bootimg-partition'
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, does the following:
+ - sets up a vfat partition
+ - copies all files listed in IMAGE_BOOT_FILES variable
+ """
+ hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
+ install_cmd = "install -d %s" % hdddir
+ exec_cmd(install_cmd)
+
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ logger.debug('Kernel dir: %s', bootimg_dir)
+
+ boot_files = None
+ for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
+ if fmt:
+ var = fmt % id
+ else:
+ var = ""
+
+ boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var)
+ if boot_files is not None:
+ break
+
+ if boot_files is None:
+ raise WicError('No boot files defined, IMAGE_BOOT_FILES unset for entry #%d' % part.lineno)
+
+ logger.debug('Boot files: %s', boot_files)
+
+ # list of tuples (src_name, dst_name)
+ deploy_files = []
+ for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files):
+ if ';' in src_entry:
+ dst_entry = tuple(src_entry.split(';'))
+ if not dst_entry[0] or not dst_entry[1]:
+ raise WicError('Malformed boot file entry: %s' % src_entry)
+ else:
+ dst_entry = (src_entry, src_entry)
+
+ logger.debug('Destination entry: %r', dst_entry)
+ deploy_files.append(dst_entry)
+
+ for deploy_entry in deploy_files:
+ src, dst = deploy_entry
+ install_task = []
+ if '*' in src:
+ # by default install files under their basename
+ entry_name_fn = os.path.basename
+ if dst != src:
+ # unless a target name was given, then treat name
+ # as a directory and append a basename
+ entry_name_fn = lambda name: \
+ os.path.join(dst,
+ os.path.basename(name))
+
+ srcs = glob(os.path.join(kernel_dir, src))
+
+ logger.debug('Globbed sources: %s', ', '.join(srcs))
+ for entry in srcs:
+ entry_dst_name = entry_name_fn(entry)
+ install_task.append((entry,
+ os.path.join(hdddir,
+ entry_dst_name)))
+ else:
+ install_task = [(os.path.join(kernel_dir, src),
+ os.path.join(hdddir, dst))]
+
+ for task in install_task:
+ src_path, dst_path = task
+ logger.debug('Install %s as %s',
+ os.path.basename(src_path), dst_path)
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (src_path, dst_path)
+ exec_cmd(install_cmd)
+
+ logger.debug('Prepare boot partition using rootfs in %s', hdddir)
+ part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
+ native_sysroot, False)
diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
new file mode 100644
index 000000000..d599112dd
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -0,0 +1,207 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-pcbios' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+
+from wic import WicError
+from wic.engine import get_custom_config
+from wic.pluginbase import SourcePlugin
+from wic.misc import (exec_cmd, exec_native_cmd,
+ get_bitbake_var, BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
+
+class BootimgPcbiosPlugin(SourcePlugin):
+ """
+ Create MBR boot partition and install syslinux on it.
+ """
+
+ name = 'bootimg-pcbios'
+
+ @classmethod
+ def _get_bootimg_dir(cls, bootimg_dir, dirname):
+ """
+ Check if dirname exists in default bootimg_dir or in STAGING_DIR.
+ """
+ for result in (bootimg_dir, get_bitbake_var("STAGING_DATADIR")):
+ if os.path.exists("%s/%s" % (result, dirname)):
+ return result
+
+ raise WicError("Couldn't find correct bootimg_dir, exiting")
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. In this case, we install the MBR.
+ """
+ bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
+ mbrfile = "%s/syslinux/" % bootimg_dir
+ if creator.ptable_format == 'msdos':
+ mbrfile += "mbr.bin"
+ elif creator.ptable_format == 'gpt':
+ mbrfile += "gptmbr.bin"
+ else:
+ raise WicError("Unsupported partition table: %s" %
+ creator.ptable_format)
+
+ if not os.path.exists(mbrfile):
+ raise WicError("Couldn't find %s. If using the -e option, do you "
+ "have the right MACHINE set in local.conf? If not, "
+ "is the bootimg_dir path correct?" % mbrfile)
+
+ full_path = creator._full_path(workdir, disk_name, "direct")
+ logger.debug("Installing MBR on disk %s as %s with size %s bytes",
+ disk_name, full_path, disk.min_size)
+
+ dd_cmd = "dd if=%s of=%s conv=notrunc" % (mbrfile, full_path)
+ exec_cmd(dd_cmd, native_sysroot)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates syslinux config
+ """
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ install_cmd = "install -d %s" % hdddir
+ exec_cmd(install_cmd)
+
+ bootloader = creator.ks.bootloader
+
+ custom_cfg = None
+ if bootloader.configfile:
+ custom_cfg = get_custom_config(bootloader.configfile)
+ if custom_cfg:
+ # Use a custom configuration for grub
+ syslinux_conf = custom_cfg
+ logger.debug("Using custom configuration file %s "
+ "for syslinux.cfg", bootloader.configfile)
+ else:
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % bootloader.configfile)
+
+ if not custom_cfg:
+ # Create syslinux configuration using parameters from wks file
+ splash = os.path.join(cr_workdir, "/hdd/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ syslinux_conf = ""
+ syslinux_conf += "PROMPT 0\n"
+ syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
+ syslinux_conf += "\n"
+ syslinux_conf += "ALLOWOPTIONS 1\n"
+ syslinux_conf += "SERIAL 0 115200\n"
+ syslinux_conf += "\n"
+ if splashline:
+ syslinux_conf += "%s\n" % splashline
+ syslinux_conf += "DEFAULT boot\n"
+ syslinux_conf += "LABEL boot\n"
+
+ kernel = "/vmlinuz"
+ syslinux_conf += "KERNEL " + kernel + "\n"
+
+ syslinux_conf += "APPEND label=boot root=%s %s\n" % \
+ (creator.rootdev, bootloader.append)
+
+ logger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg",
+ cr_workdir)
+ cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
+ cfg.write(syslinux_conf)
+ cfg.close()
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for legacy bios boot partition.
+ """
+ bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
+
+ staging_kernel_dir = kernel_dir
+
+ hdddir = "%s/hdd/boot" % cr_workdir
+
+ cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
+ (staging_kernel_dir, hdddir),
+ "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
+ (bootimg_dir, hdddir),
+ "install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
+ (bootimg_dir, hdddir),
+ "install -m 444 %s/syslinux/libcom32.c32 %s/libcom32.c32" %
+ (bootimg_dir, hdddir),
+ "install -m 444 %s/syslinux/libutil.c32 %s/libutil.c32" %
+ (bootimg_dir, hdddir))
+
+ for install_cmd in cmds:
+ exec_cmd(install_cmd)
+
+ du_cmd = "du -bks %s" % hdddir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+
+ extra_blocks = part.get_extra_block_count(blocks)
+
+ if extra_blocks < BOOTDD_EXTRA_SPACE:
+ extra_blocks = BOOTDD_EXTRA_SPACE
+
+ blocks += extra_blocks
+
+ logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+ extra_blocks, part.mountpoint, blocks)
+
+ # dosfs image, created by mkdosfs
+ bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
+
+ dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
+ (part.fsuuid, bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ syslinux_cmd = "syslinux %s" % bootimg
+ exec_native_cmd(syslinux_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ du_cmd = "du -Lbks %s" % bootimg
+ out = exec_cmd(du_cmd)
+ bootimg_size = out.split()[0]
+
+ part.size = int(bootimg_size)
+ part.source_file = bootimg
diff --git a/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
new file mode 100644
index 000000000..d6bd3bff7
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -0,0 +1,466 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
+#
+# AUTHORS
+# Mihaly Varga <mihaly.varga (at] ni.com>
+
+import glob
+import logging
+import os
+import re
+import shutil
+
+from wic import WicError
+from wic.engine import get_custom_config
+from wic.pluginbase import SourcePlugin
+from wic.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
+
+class IsoImagePlugin(SourcePlugin):
+ """
+ Create a bootable ISO image
+
+ This plugin creates a hybrid, legacy and EFI bootable ISO image. The
+ generated image can be used on optical media as well as USB media.
+
+ Legacy boot uses syslinux and EFI boot uses grub or gummiboot (not
+ implemented yet) as bootloader. The plugin creates the directories required
+ by bootloaders and populates them by creating and configuring the
+ bootloader files.
+
+ Example kickstart file:
+ part /boot --source isoimage-isohybrid --sourceparams="loader=grub-efi, \\
+ image_name= IsoImage" --ondisk cd --label LIVECD --fstype=ext2
+ bootloader --timeout=10 --append=" "
+
+ In --sourceparams "loader" specifies the bootloader used for booting in EFI
+ mode, while "image_name" specifies the name of the generated image. In the
+ example above, wic creates an ISO image named IsoImage-cd.direct (default
+ extension added by direct imeger plugin) and a file named IsoImage-cd.iso
+ """
+
+ name = 'isoimage-isohybrid'
+
+ @classmethod
+ def do_configure_syslinux(cls, creator, cr_workdir):
+ """
+ Create loader-specific (syslinux) config
+ """
+ splash = os.path.join(cr_workdir, "ISO/boot/splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ bootloader = creator.ks.bootloader
+
+ syslinux_conf = ""
+ syslinux_conf += "PROMPT 0\n"
+ syslinux_conf += "TIMEOUT %s \n" % (bootloader.timeout or 10)
+ syslinux_conf += "\n"
+ syslinux_conf += "ALLOWOPTIONS 1\n"
+ syslinux_conf += "SERIAL 0 115200\n"
+ syslinux_conf += "\n"
+ if splashline:
+ syslinux_conf += "%s\n" % splashline
+ syslinux_conf += "DEFAULT boot\n"
+ syslinux_conf += "LABEL boot\n"
+
+ kernel = "/bzImage"
+ syslinux_conf += "KERNEL " + kernel + "\n"
+ syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
+ % bootloader.append
+
+ logger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg",
+ cr_workdir)
+
+ with open("%s/ISO/isolinux/isolinux.cfg" % cr_workdir, "w") as cfg:
+ cfg.write(syslinux_conf)
+
+ @classmethod
+ def do_configure_grubefi(cls, part, creator, target_dir):
+ """
+ Create loader-specific (grub-efi) config
+ """
+ configfile = creator.ks.bootloader.configfile
+ if configfile:
+ grubefi_conf = get_custom_config(configfile)
+ if grubefi_conf:
+ logger.debug("Using custom configuration file %s for grub.cfg",
+ configfile)
+ else:
+ raise WicError("configfile is specified "
+ "but failed to get it from %s", configfile)
+ else:
+ splash = os.path.join(target_dir, "splash.jpg")
+ if os.path.exists(splash):
+ splashline = "menu background splash.jpg"
+ else:
+ splashline = ""
+
+ bootloader = creator.ks.bootloader
+
+ grubefi_conf = ""
+ grubefi_conf += "serial --unit=0 --speed=115200 --word=8 "
+ grubefi_conf += "--parity=no --stop=1\n"
+ grubefi_conf += "default=boot\n"
+ grubefi_conf += "timeout=%s\n" % (bootloader.timeout or 10)
+ grubefi_conf += "\n"
+ grubefi_conf += "search --set=root --label %s " % part.label
+ grubefi_conf += "\n"
+ grubefi_conf += "menuentry 'boot'{\n"
+
+ kernel = "/bzImage"
+
+ grubefi_conf += "linux %s rootwait %s\n" \
+ % (kernel, bootloader.append)
+ grubefi_conf += "initrd /initrd \n"
+ grubefi_conf += "}\n"
+
+ if splashline:
+ grubefi_conf += "%s\n" % splashline
+
+ cfg_path = os.path.join(target_dir, "grub.cfg")
+ logger.debug("Writing grubefi config %s", cfg_path)
+
+ with open(cfg_path, "w") as cfg:
+ cfg.write(grubefi_conf)
+
+ @staticmethod
+ def _build_initramfs_path(rootfs_dir, cr_workdir):
+ """
+ Create path for initramfs image
+ """
+
+ initrd = get_bitbake_var("INITRD_LIVE") or get_bitbake_var("INITRD")
+ if not initrd:
+ initrd_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not initrd_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting.")
+
+ image_name = get_bitbake_var("IMAGE_BASENAME")
+ if not image_name:
+ raise WicError("Couldn't find IMAGE_BASENAME, exiting.")
+
+ image_type = get_bitbake_var("INITRAMFS_FSTYPES")
+ if not image_type:
+ raise WicError("Couldn't find INITRAMFS_FSTYPES, exiting.")
+
+ machine = os.path.basename(initrd_dir)
+
+ pattern = '%s/%s*%s.%s' % (initrd_dir, image_name, machine, image_type)
+ files = glob.glob(pattern)
+ if files:
+ initrd = files[0]
+
+ if not initrd or not os.path.exists(initrd):
+ # Create initrd from rootfs directory
+ initrd = "%s/initrd.cpio.gz" % cr_workdir
+ initrd_dir = "%s/INITRD" % cr_workdir
+ shutil.copytree("%s" % rootfs_dir, \
+ "%s" % initrd_dir, symlinks=True)
+
+ if os.path.isfile("%s/init" % rootfs_dir):
+ shutil.copy2("%s/init" % rootfs_dir, "%s/init" % initrd_dir)
+ elif os.path.lexists("%s/init" % rootfs_dir):
+ os.symlink(os.readlink("%s/init" % rootfs_dir), \
+ "%s/init" % initrd_dir)
+ elif os.path.isfile("%s/sbin/init" % rootfs_dir):
+ shutil.copy2("%s/sbin/init" % rootfs_dir, \
+ "%s" % initrd_dir)
+ elif os.path.lexists("%s/sbin/init" % rootfs_dir):
+ os.symlink(os.readlink("%s/sbin/init" % rootfs_dir), \
+ "%s/init" % initrd_dir)
+ else:
+ raise WicError("Couldn't find or build initrd, exiting.")
+
+ exec_cmd("cd %s && find . | cpio -o -H newc -R +0:+0 >./initrd.cpio " \
+ % initrd_dir, as_shell=True)
+ exec_cmd("gzip -f -9 -c %s/initrd.cpio > %s" \
+ % (cr_workdir, initrd), as_shell=True)
+ shutil.rmtree(initrd_dir)
+
+ return initrd
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition(), creates loader-specific config
+ """
+ isodir = "%s/ISO/" % cr_workdir
+
+ if os.path.exists(isodir):
+ shutil.rmtree(isodir)
+
+ install_cmd = "install -d %s " % isodir
+ exec_cmd(install_cmd)
+
+ # Overwrite the name of the created image
+ logger.debug(source_params)
+ if 'image_name' in source_params and \
+ source_params['image_name'].strip():
+ creator.name = source_params['image_name'].strip()
+ logger.debug("The name of the image is: %s", creator.name)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for a bootable ISO image.
+ """
+
+ isodir = "%s/ISO" % cr_workdir
+
+ if part.rootfs_dir is None:
+ if not 'ROOTFS_DIR' in rootfs_dir:
+ raise WicError("Couldn't find --rootfs-dir, exiting.")
+ rootfs_dir = rootfs_dir['ROOTFS_DIR']
+ else:
+ if part.rootfs_dir in rootfs_dir:
+ rootfs_dir = rootfs_dir[part.rootfs_dir]
+ elif part.rootfs_dir:
+ rootfs_dir = part.rootfs_dir
+ else:
+ raise WicError("Couldn't find --rootfs-dir=%s connection "
+ "or it is not a valid path, exiting." %
+ part.rootfs_dir)
+
+ if not os.path.isdir(rootfs_dir):
+ rootfs_dir = get_bitbake_var("IMAGE_ROOTFS")
+ if not os.path.isdir(rootfs_dir):
+ raise WicError("Couldn't find IMAGE_ROOTFS, exiting.")
+
+ part.rootfs_dir = rootfs_dir
+
+ # Prepare rootfs.img
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ img_iso_dir = get_bitbake_var("ISODIR")
+ rootfs_img = "%s/rootfs.img" % img_iso_dir
+ if not os.path.isfile(rootfs_img):
+ # check if rootfs.img is in deploydir
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ image_name = get_bitbake_var("IMAGE_LINK_NAME")
+ rootfs_img = "%s/%s.%s" \
+ % (deploy_dir, image_name, part.fstype)
+
+ if not os.path.isfile(rootfs_img):
+ # create image file with type specified by --fstype
+ # which contains rootfs
+ du_cmd = "du -bks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ part.size = int(out.split()[0])
+ part.extra_space = 0
+ part.overhead_factor = 1.2
+ part.prepare_rootfs(cr_workdir, oe_builddir, rootfs_dir, \
+ native_sysroot)
+ rootfs_img = part.source_file
+
+ install_cmd = "install -m 0644 %s %s/rootfs.img" \
+ % (rootfs_img, isodir)
+ exec_cmd(install_cmd)
+
+ # Remove the temporary file created by part.prepare_rootfs()
+ if os.path.isfile(part.source_file):
+ os.remove(part.source_file)
+
+ # Support using a different initrd other than default
+ if source_params.get('initrd'):
+ initrd = source_params['initrd']
+ if not deploy_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+ cp_cmd = "cp %s/%s %s" % (deploy_dir, initrd, cr_workdir)
+ exec_cmd(cp_cmd)
+ else:
+ # Prepare initial ramdisk
+ initrd = "%s/initrd" % deploy_dir
+ if not os.path.isfile(initrd):
+ initrd = "%s/initrd" % img_iso_dir
+ if not os.path.isfile(initrd):
+ initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
+
+ install_cmd = "install -m 0644 %s %s/initrd" % (initrd, isodir)
+ exec_cmd(install_cmd)
+
+ # Remove the temporary file created by _build_initramfs_path function
+ if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
+ os.remove("%s/initrd.cpio.gz" % cr_workdir)
+
+ # Install bzImage
+ install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
+ (kernel_dir, isodir)
+ exec_cmd(install_cmd)
+
+ #Create bootloader for efi boot
+ try:
+ target_dir = "%s/EFI/BOOT" % isodir
+ if os.path.exists(target_dir):
+ shutil.rmtree(target_dir)
+
+ os.makedirs(target_dir)
+
+ if source_params['loader'] == 'grub-efi':
+ # Builds bootx64.efi/bootia32.efi if ISODIR didn't exist or
+ # didn't contains it
+ target_arch = get_bitbake_var("TARGET_SYS")
+ if not target_arch:
+ raise WicError("Coludn't find target architecture")
+
+ if re.match("x86_64", target_arch):
+ grub_image = "grub-efi-bootx64.efi"
+ elif re.match('i.86', target_arch):
+ grub_image = "grub-efi-bootia32.efi"
+ else:
+ raise WicError("grub-efi is incompatible with target %s" %
+ target_arch)
+
+ grub_target = os.path.join(target_dir, grub_image)
+ if not os.path.isfile(grub_target):
+ grub_src = os.path.join(deploy_dir, grub_image)
+ if not os.path.exists(grub_src):
+ raise WicError("Grub loader %s is not found in %s. "
+ "Please build grub-efi first" % (grub_image, deploy_dir))
+ shutil.copy(grub_src, grub_target)
+
+ if not os.path.isfile(os.path.join(target_dir, "boot.cfg")):
+ cls.do_configure_grubefi(part, creator, target_dir)
+
+ else:
+ raise WicError("unrecognized bootimg-efi loader: %s" %
+ source_params['loader'])
+ except KeyError:
+ raise WicError("bootimg-efi requires a loader, none specified")
+
+ # Create efi.img that contains bootloader files for EFI booting
+ # if ISODIR didn't exist or didn't contains it
+ if os.path.isfile("%s/efi.img" % img_iso_dir):
+ install_cmd = "install -m 0644 %s/efi.img %s/efi.img" % \
+ (img_iso_dir, isodir)
+ exec_cmd(install_cmd)
+ else:
+ du_cmd = "du -bks %s/EFI" % isodir
+ out = exec_cmd(du_cmd)
+ blocks = int(out.split()[0])
+ # Add some extra space for file system overhead
+ blocks += 100
+ logger.debug("Added 100 extra blocks to %s to get to %d "
+ "total blocks", part.mountpoint, blocks)
+
+ # dosfs image for EFI boot
+ bootimg = "%s/efi.img" % isodir
+
+ dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
+ % (bootimg, blocks)
+ exec_native_cmd(dosfs_cmd, native_sysroot)
+
+ mmd_cmd = "mmd -i %s ::/EFI" % bootimg
+ exec_native_cmd(mmd_cmd, native_sysroot)
+
+ mcopy_cmd = "mcopy -i %s -s %s/EFI/* ::/EFI/" \
+ % (bootimg, isodir)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
+ chmod_cmd = "chmod 644 %s" % bootimg
+ exec_cmd(chmod_cmd)
+
+ # Prepare files for legacy boot
+ syslinux_dir = get_bitbake_var("STAGING_DATADIR")
+ if not syslinux_dir:
+ raise WicError("Couldn't find STAGING_DATADIR, exiting.")
+
+ if os.path.exists("%s/isolinux" % isodir):
+ shutil.rmtree("%s/isolinux" % isodir)
+
+ install_cmd = "install -d %s/isolinux" % isodir
+ exec_cmd(install_cmd)
+
+ cls.do_configure_syslinux(creator, cr_workdir)
+
+ install_cmd = "install -m 444 %s/syslinux/ldlinux.sys " % syslinux_dir
+ install_cmd += "%s/isolinux/ldlinux.sys" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 444 %s/syslinux/isohdpfx.bin " % syslinux_dir
+ install_cmd += "%s/isolinux/isohdpfx.bin" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 644 %s/syslinux/isolinux.bin " % syslinux_dir
+ install_cmd += "%s/isolinux/isolinux.bin" % isodir
+ exec_cmd(install_cmd)
+
+ install_cmd = "install -m 644 %s/syslinux/ldlinux.c32 " % syslinux_dir
+ install_cmd += "%s/isolinux/ldlinux.c32" % isodir
+ exec_cmd(install_cmd)
+
+ #create ISO image
+ iso_img = "%s/tempiso_img.iso" % cr_workdir
+ iso_bootimg = "isolinux/isolinux.bin"
+ iso_bootcat = "isolinux/boot.cat"
+ efi_img = "efi.img"
+
+ mkisofs_cmd = "mkisofs -V %s " % part.label
+ mkisofs_cmd += "-o %s -U " % iso_img
+ mkisofs_cmd += "-J -joliet-long -r -iso-level 2 -b %s " % iso_bootimg
+ mkisofs_cmd += "-c %s -no-emul-boot -boot-load-size 4 " % iso_bootcat
+ mkisofs_cmd += "-boot-info-table -eltorito-alt-boot "
+ mkisofs_cmd += "-eltorito-platform 0xEF -eltorito-boot %s " % efi_img
+ mkisofs_cmd += "-no-emul-boot %s " % isodir
+
+ logger.debug("running command: %s", mkisofs_cmd)
+ exec_native_cmd(mkisofs_cmd, native_sysroot)
+
+ shutil.rmtree(isodir)
+
+ du_cmd = "du -Lbks %s" % iso_img
+ out = exec_cmd(du_cmd)
+ isoimg_size = int(out.split()[0])
+
+ part.size = isoimg_size
+ part.source_file = iso_img
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image. In this case, we insert/modify the MBR using isohybrid
+ utility for booting via BIOS from disk storage devices.
+ """
+
+ iso_img = "%s.p1" % disk.path
+ full_path = creator._full_path(workdir, disk_name, "direct")
+ full_path_iso = creator._full_path(workdir, disk_name, "iso")
+
+ isohybrid_cmd = "isohybrid -u %s" % iso_img
+ logger.debug("running command: %s", isohybrid_cmd)
+ exec_native_cmd(isohybrid_cmd, native_sysroot)
+
+ # Replace the image created by direct plugin with the one created by
+ # mkisofs command. This is necessary because the iso image created by
+ # mkisofs has a very specific MBR is system area of the ISO image, and
+ # direct plugin adds and configures an another MBR.
+ logger.debug("Replaceing the image created by direct plugin\n")
+ os.remove(disk.path)
+ shutil.copy2(iso_img, full_path_iso)
+ shutil.copy2(full_path_iso, full_path)
diff --git a/poky/scripts/lib/wic/plugins/source/rawcopy.py b/poky/scripts/lib/wic/plugins/source/rawcopy.py
new file mode 100644
index 000000000..e86398ac8
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/rawcopy.py
@@ -0,0 +1,91 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import logging
+import os
+
+from wic import WicError
+from wic.pluginbase import SourcePlugin
+from wic.misc import exec_cmd, get_bitbake_var
+from wic.filemap import sparse_copy
+
+logger = logging.getLogger('wic')
+
+class RawCopyPlugin(SourcePlugin):
+ """
+ Populate partition content from raw image file.
+ """
+
+ name = 'rawcopy'
+
+ @staticmethod
+ def do_image_label(fstype, dst, label):
+ if fstype.startswith('ext'):
+ cmd = 'tune2fs -L %s %s' % (label, dst)
+ elif fstype in ('msdos', 'vfat'):
+ cmd = 'dosfslabel %s %s' % (dst, label)
+ elif fstype == 'btrfs':
+ cmd = 'btrfs filesystem label %s %s' % (dst, label)
+ elif fstype == 'swap':
+ cmd = 'mkswap -L %s %s' % (label, dst)
+ elif fstype == 'squashfs':
+ raise WicError("It's not possible to update a squashfs "
+ "filesystem label '%s'" % (label))
+ else:
+ raise WicError("Cannot update filesystem label: "
+ "Unknown fstype: '%s'" % (fstype))
+
+ exec_cmd(cmd)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ logger.debug('Kernel dir: %s', kernel_dir)
+
+ if 'file' not in source_params:
+ raise WicError("No file specified")
+
+ src = os.path.join(kernel_dir, source_params['file'])
+ dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+
+ if 'skip' in source_params:
+ sparse_copy(src, dst, skip=int(source_params['skip']))
+ else:
+ sparse_copy(src, dst)
+
+ # get the size in the right units for kickstart (kB)
+ du_cmd = "du -Lbks %s" % dst
+ out = exec_cmd(du_cmd)
+ filesize = int(out.split()[0])
+
+ if filesize > part.size:
+ part.size = filesize
+
+ if part.label:
+ RawCopyPlugin.do_image_label(part.fstype, dst, part.label)
+
+ part.source_file = dst
diff --git a/poky/scripts/lib/wic/plugins/source/rootfs.py b/poky/scripts/lib/wic/plugins/source/rootfs.py
new file mode 100644
index 000000000..aec720fb2
--- /dev/null
+++ b/poky/scripts/lib/wic/plugins/source/rootfs.py
@@ -0,0 +1,126 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2014, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'rootfs' source plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+# Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
+#
+
+import logging
+import os
+import shutil
+import sys
+
+from oe.path import copyhardlinktree
+
+from wic import WicError
+from wic.pluginbase import SourcePlugin
+from wic.misc import get_bitbake_var
+
+logger = logging.getLogger('wic')
+
+class RootfsPlugin(SourcePlugin):
+ """
+ Populate partition content from a rootfs directory.
+ """
+
+ name = 'rootfs'
+
+ @staticmethod
+ def __get_rootfs_dir(rootfs_dir):
+ if os.path.isdir(rootfs_dir):
+ return os.path.realpath(rootfs_dir)
+
+ image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
+ if not os.path.isdir(image_rootfs_dir):
+ raise WicError("No valid artifact IMAGE_ROOTFS from image "
+ "named %s has been found at %s, exiting." %
+ (rootfs_dir, image_rootfs_dir))
+
+ return os.path.realpath(image_rootfs_dir)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ krootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, prepare content for legacy bios boot partition.
+ """
+ if part.rootfs_dir is None:
+ if not 'ROOTFS_DIR' in krootfs_dir:
+ raise WicError("Couldn't find --rootfs-dir, exiting")
+
+ rootfs_dir = krootfs_dir['ROOTFS_DIR']
+ else:
+ if part.rootfs_dir in krootfs_dir:
+ rootfs_dir = krootfs_dir[part.rootfs_dir]
+ elif part.rootfs_dir:
+ rootfs_dir = part.rootfs_dir
+ else:
+ raise WicError("Couldn't find --rootfs-dir=%s connection or "
+ "it is not a valid path, exiting" % part.rootfs_dir)
+
+ part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+
+ new_rootfs = None
+ # Handle excluded paths.
+ if part.exclude_path is not None:
+ # We need a new rootfs directory we can delete files from. Copy to
+ # workdir.
+ new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
+
+ if os.path.lexists(new_rootfs):
+ shutil.rmtree(os.path.join(new_rootfs))
+
+ copyhardlinktree(part.rootfs_dir, new_rootfs)
+
+ for orig_path in part.exclude_path:
+ path = orig_path
+ if os.path.isabs(path):
+ logger.error("Must be relative: --exclude-path=%s" % orig_path)
+ sys.exit(1)
+
+ full_path = os.path.realpath(os.path.join(new_rootfs, path))
+
+ # Disallow climbing outside of parent directory using '..',
+ # because doing so could be quite disastrous (we will delete the
+ # directory).
+ if not full_path.startswith(new_rootfs):
+ logger.error("'%s' points to a path outside the rootfs" % orig_path)
+ sys.exit(1)
+
+ if path.endswith(os.sep):
+ # Delete content only.
+ for entry in os.listdir(full_path):
+ full_entry = os.path.join(full_path, entry)
+ if os.path.isdir(full_entry) and not os.path.islink(full_entry):
+ shutil.rmtree(full_entry)
+ else:
+ os.remove(full_entry)
+ else:
+ # Delete whole directory.
+ shutil.rmtree(full_path)
+
+ part.prepare_rootfs(cr_workdir, oe_builddir,
+ new_rootfs or part.rootfs_dir, native_sysroot)
diff --git a/poky/scripts/lnr b/poky/scripts/lnr
new file mode 100755
index 000000000..5fed780eb
--- /dev/null
+++ b/poky/scripts/lnr
@@ -0,0 +1,21 @@
+#! /usr/bin/env python3
+
+# Create a *relative* symlink, just like ln --relative does but without needing
+# coreutils 8.16.
+
+import sys, os
+
+if len(sys.argv) != 3:
+ print("$ lnr TARGET LINK_NAME")
+ sys.exit(1)
+
+target = sys.argv[1]
+linkname = sys.argv[2]
+
+if os.path.isabs(target):
+ if not os.path.isabs(linkname):
+ linkname = os.path.abspath(linkname)
+ start = os.path.dirname(linkname)
+ target = os.path.relpath(target, start)
+
+os.symlink(target, linkname)
diff --git a/poky/scripts/multilib_header_wrapper.h b/poky/scripts/multilib_header_wrapper.h
new file mode 100644
index 000000000..f516673b6
--- /dev/null
+++ b/poky/scripts/multilib_header_wrapper.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2005-2011 by Wind River Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+
+#if defined (__arm__)
+#define __MHWORDSIZE 32
+#elif defined (__aarch64__) && defined ( __LP64__)
+#define __MHWORDSIZE 64
+#elif defined (__aarch64__)
+#define __MHWORDSIZE 32
+#else
+#include <bits/wordsize.h>
+#if defined (__WORDSIZE)
+#define __MHWORDSIZE __WORDSIZE
+#else
+#error "__WORDSIZE is not defined"
+#endif
+#endif
+
+#if __MHWORDSIZE == 32
+
+#ifdef _MIPS_SIM
+
+#if _MIPS_SIM == _ABIO32
+#include <ENTER_HEADER_FILENAME_HERE-32.h>
+#elif _MIPS_SIM == _ABIN32
+#include <ENTER_HEADER_FILENAME_HERE-n32.h>
+#else
+#error "Unknown _MIPS_SIM"
+#endif
+
+#else /* _MIPS_SIM is not defined */
+#include <ENTER_HEADER_FILENAME_HERE-32.h>
+#endif
+
+#elif __MHWORDSIZE == 64
+#include <ENTER_HEADER_FILENAME_HERE-64.h>
+#else
+#error "Unknown __WORDSIZE detected"
+#endif /* matches #if __WORDSIZE == 32 */
+
diff --git a/poky/scripts/native-intercept/chown b/poky/scripts/native-intercept/chown
new file mode 100755
index 000000000..4f43271c2
--- /dev/null
+++ b/poky/scripts/native-intercept/chown
@@ -0,0 +1,2 @@
+#! /bin/sh
+echo "Intercept $0: $@ -- do nothing"
diff --git a/poky/scripts/oe-build-perf-report b/poky/scripts/oe-build-perf-report
new file mode 100755
index 000000000..dc999c45c
--- /dev/null
+++ b/poky/scripts/oe-build-perf-report
@@ -0,0 +1,661 @@
+#!/usr/bin/python3
+#
+# Examine build performance test results
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import json
+import logging
+import os
+import re
+import sys
+from collections import namedtuple, OrderedDict
+from operator import attrgetter
+from xml.etree import ElementTree as ET
+
+# Import oe libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+from build_perf import print_table
+from build_perf.report import (metadata_xml_to_json, results_xml_to_json,
+ aggregate_data, aggregate_metadata, measurement_stats,
+ AggregateTestData)
+from build_perf import html
+from buildstats import BuildStats, diff_buildstats, BSVerDiff
+
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo, GitError
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+
+# Container class for tester revisions
+TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
+
+
+def get_test_runs(repo, tag_name, **kwargs):
+ """Get a sorted list of test runs, matching given pattern"""
+ # First, get field names from the tag name pattern
+ field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
+ undef_fields = [f for f in field_names if f not in kwargs.keys()]
+
+ # Fields for formatting tag name pattern
+ str_fields = dict([(f, '*') for f in field_names])
+ str_fields.update(kwargs)
+
+ # Get a list of all matching tags
+ tag_pattern = tag_name.format(**str_fields)
+ tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
+ log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
+
+ # Parse undefined fields from tag names
+ str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
+ str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
+ str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
+ str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
+ str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
+ # escape parenthesis in fields in order to not messa up the regexp
+ fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
+ str_fields.update(fixed_fields)
+ tag_re = re.compile(tag_name.format(**str_fields))
+
+ # Parse fields from tags
+ revs = []
+ for tag in tags:
+ m = tag_re.match(tag)
+ groups = m.groupdict()
+ revs.append([groups[f] for f in undef_fields] + [tag])
+
+ # Return field names and a sorted list of revs
+ return undef_fields, sorted(revs)
+
+def list_test_revs(repo, tag_name, verbosity, **kwargs):
+ """Get list of all tested revisions"""
+ valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
+
+ fields, revs = get_test_runs(repo, tag_name, **valid_kwargs)
+ ignore_fields = ['tag_number']
+ if verbosity < 2:
+ extra_fields = ['COMMITS', 'TEST RUNS']
+ ignore_fields.extend(['commit_number', 'commit'])
+ else:
+ extra_fields = ['TEST RUNS']
+
+ print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields]
+
+ # Sort revs
+ rows = [[fields[i].upper() for i in print_fields] + extra_fields]
+
+ prev = [''] * len(print_fields)
+ prev_commit = None
+ commit_cnt = 0
+ commit_field = fields.index('commit')
+ for rev in revs:
+ # Only use fields that we want to print
+ cols = [rev[i] for i in print_fields]
+
+
+ if cols != prev:
+ commit_cnt = 1
+ test_run_cnt = 1
+ new_row = [''] * (len(print_fields) + len(extra_fields))
+
+ for i in print_fields:
+ if cols[i] != prev[i]:
+ break
+ new_row[i:-len(extra_fields)] = cols[i:]
+ rows.append(new_row)
+ else:
+ if rev[commit_field] != prev_commit:
+ commit_cnt += 1
+ test_run_cnt += 1
+
+ if verbosity < 2:
+ new_row[-2] = commit_cnt
+ new_row[-1] = test_run_cnt
+ prev = cols
+ prev_commit = rev[commit_field]
+
+ print_table(rows)
+
+def get_test_revs(repo, tag_name, **kwargs):
+ """Get list of all tested revisions"""
+ fields, runs = get_test_runs(repo, tag_name, **kwargs)
+
+ revs = {}
+ commit_i = fields.index('commit')
+ commit_num_i = fields.index('commit_number')
+ for run in runs:
+ commit = run[commit_i]
+ commit_num = run[commit_num_i]
+ tag = run[-1]
+ if not commit in revs:
+ revs[commit] = TestedRev(commit, commit_num, [tag])
+ else:
+ assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
+ revs[commit].tags.append(tag)
+
+ # Return in sorted table
+ revs = sorted(revs.values(), key=attrgetter('commit_number'))
+ log.debug("Found %d tested revisions:\n %s", len(revs),
+ "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
+ return revs
+
+def rev_find(revs, attr, val):
+ """Search from a list of TestedRev"""
+ for i, rev in enumerate(revs):
+ if getattr(rev, attr) == val:
+ return i
+ raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
+
+def is_xml_format(repo, commit):
+ """Check if the commit contains xml (or json) data"""
+ if repo.rev_parse(commit + ':results.xml'):
+ log.debug("Detected report in xml format in %s", commit)
+ return True
+ else:
+ log.debug("No xml report in %s, assuming json formatted results", commit)
+ return False
+
+def read_results(repo, tags, xml=True):
+ """Read result files from repo"""
+
+ def parse_xml_stream(data):
+ """Parse multiple concatenated XML objects"""
+ objs = []
+ xml_d = ""
+ for line in data.splitlines():
+ if xml_d and line.startswith('<?xml version='):
+ objs.append(ET.fromstring(xml_d))
+ xml_d = line
+ else:
+ xml_d += line
+ objs.append(ET.fromstring(xml_d))
+ return objs
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+ return objs
+
+ num_revs = len(tags)
+
+ # Optimize by reading all data with one git command
+ log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0])
+ if xml:
+ git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags]
+ data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+ return ([metadata_xml_to_json(e) for e in data[0:num_revs]],
+ [results_xml_to_json(e) for e in data[num_revs:]])
+ else:
+ git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags]
+ data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+ return data[0:num_revs], data[num_revs:]
+
+
+def get_data_item(data, key):
+ """Nested getitem lookup"""
+ for k in key.split('.'):
+ data = data[k]
+ return data
+
+
+def metadata_diff(metadata_l, metadata_r):
+ """Prepare a metadata diff for printing"""
+ keys = [('Hostname', 'hostname', 'hostname'),
+ ('Branch', 'branch', 'layers.meta.branch'),
+ ('Commit number', 'commit_num', 'layers.meta.commit_count'),
+ ('Commit', 'commit', 'layers.meta.commit'),
+ ('Number of test runs', 'testrun_count', 'testrun_count')
+ ]
+
+ def _metadata_diff(key):
+ """Diff metadata from two test reports"""
+ try:
+ val1 = get_data_item(metadata_l, key)
+ except KeyError:
+ val1 = '(N/A)'
+ try:
+ val2 = get_data_item(metadata_r, key)
+ except KeyError:
+ val2 = '(N/A)'
+ return val1, val2
+
+ metadata = OrderedDict()
+ for title, key, key_json in keys:
+ value_l, value_r = _metadata_diff(key_json)
+ metadata[key] = {'title': title,
+ 'value_old': value_l,
+ 'value': value_r}
+ return metadata
+
+
+def print_diff_report(metadata_l, data_l, metadata_r, data_r):
+ """Print differences between two data sets"""
+
+ # First, print general metadata
+ print("\nTEST METADATA:\n==============")
+ meta_diff = metadata_diff(metadata_l, metadata_r)
+ rows = []
+ row_fmt = ['{:{wid}} ', '{:<{wid}} ', '{:<{wid}}']
+ rows = [['', 'CURRENT COMMIT', 'COMPARING WITH']]
+ for key, val in meta_diff.items():
+ # Shorten commit hashes
+ if key == 'commit':
+ rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]])
+ else:
+ rows.append([val['title'] + ':', val['value'], val['value_old']])
+ print_table(rows, row_fmt)
+
+
+ # Print test results
+ print("\nTEST RESULTS:\n=============")
+
+ tests = list(data_l['tests'].keys())
+ # Append tests that are only present in 'right' set
+ tests += [t for t in list(data_r['tests'].keys()) if t not in tests]
+
+ # Prepare data to be printed
+ rows = []
+ row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', ' {:>{wid}}', ' {:{wid}} ', '{:{wid}}',
+ ' {:>{wid}}', ' {:>{wid}}']
+ num_cols = len(row_fmt)
+ for test in tests:
+ test_l = data_l['tests'][test] if test in data_l['tests'] else None
+ test_r = data_r['tests'][test] if test in data_r['tests'] else None
+ pref = ' '
+ if test_l is None:
+ pref = '+'
+ elif test_r is None:
+ pref = '-'
+ descr = test_l['description'] if test_l else test_r['description']
+ heading = "{} {}: {}".format(pref, test, descr)
+
+ rows.append([heading])
+
+ # Generate the list of measurements
+ meas_l = test_l['measurements'] if test_l else {}
+ meas_r = test_r['measurements'] if test_r else {}
+ measurements = list(meas_l.keys())
+ measurements += [m for m in list(meas_r.keys()) if m not in measurements]
+
+ for meas in measurements:
+ m_pref = ' '
+ if meas in meas_l:
+ stats_l = measurement_stats(meas_l[meas], 'l.')
+ else:
+ stats_l = measurement_stats(None, 'l.')
+ m_pref = '+'
+ if meas in meas_r:
+ stats_r = measurement_stats(meas_r[meas], 'r.')
+ else:
+ stats_r = measurement_stats(None, 'r.')
+ m_pref = '-'
+ stats = stats_l.copy()
+ stats.update(stats_r)
+
+ absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean'])
+ reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean'])
+ if stats['r.mean'] > stats['l.mean']:
+ absdiff = '+' + str(absdiff)
+ else:
+ absdiff = str(absdiff)
+ rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'],
+ str(stats['l.mean']), '->', str(stats['r.mean']),
+ absdiff, reldiff])
+ rows.append([''] * num_cols)
+
+ print_table(rows, row_fmt)
+
+ print()
+
+
+class BSSummary(object):
+ def __init__(self, bs1, bs2):
+ self.tasks = {'count': bs2.num_tasks,
+ 'change': '{:+d}'.format(bs2.num_tasks - bs1.num_tasks)}
+ self.top_consumer = None
+ self.top_decrease = None
+ self.top_increase = None
+ self.ver_diff = OrderedDict()
+
+ tasks_diff = diff_buildstats(bs1, bs2, 'cputime')
+
+ # Get top consumers of resources
+ tasks_diff = sorted(tasks_diff, key=attrgetter('value2'))
+ self.top_consumer = tasks_diff[-5:]
+
+ # Get biggest increase and decrease in resource usage
+ tasks_diff = sorted(tasks_diff, key=attrgetter('absdiff'))
+ self.top_decrease = tasks_diff[0:5]
+ self.top_increase = tasks_diff[-5:]
+
+ # Compare recipe versions and prepare data for display
+ ver_diff = BSVerDiff(bs1, bs2)
+ if ver_diff:
+ if ver_diff.new:
+ self.ver_diff['New recipes'] = [(n, r.evr) for n, r in ver_diff.new.items()]
+ if ver_diff.dropped:
+ self.ver_diff['Dropped recipes'] = [(n, r.evr) for n, r in ver_diff.dropped.items()]
+ if ver_diff.echanged:
+ self.ver_diff['Epoch changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.echanged.items()]
+ if ver_diff.vchanged:
+ self.ver_diff['Version changed'] = [(n, "{} &rarr; {}".format(r.left.version, r.right.version)) for n, r in ver_diff.vchanged.items()]
+ if ver_diff.rchanged:
+ self.ver_diff['Revision changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.rchanged.items()]
+
+
+def print_html_report(data, id_comp, buildstats):
+ """Print report in html format"""
+ # Handle metadata
+ metadata = metadata_diff(data[id_comp].metadata, data[-1].metadata)
+
+ # Generate list of tests
+ tests = []
+ for test in data[-1].results['tests'].keys():
+ test_r = data[-1].results['tests'][test]
+ new_test = {'name': test_r['name'],
+ 'description': test_r['description'],
+ 'status': test_r['status'],
+ 'measurements': [],
+ 'err_type': test_r.get('err_type'),
+ }
+ # Limit length of err output shown
+ if 'message' in test_r:
+ lines = test_r['message'].splitlines()
+ if len(lines) > 20:
+ new_test['message'] = '...\n' + '\n'.join(lines[-20:])
+ else:
+ new_test['message'] = test_r['message']
+
+
+ # Generate the list of measurements
+ for meas in test_r['measurements'].keys():
+ meas_r = test_r['measurements'][meas]
+ meas_type = 'time' if meas_r['type'] == 'sysres' else 'size'
+ new_meas = {'name': meas_r['name'],
+ 'legend': meas_r['legend'],
+ 'description': meas_r['name'] + ' ' + meas_type,
+ }
+ samples = []
+
+ # Run through all revisions in our data
+ for meta, test_data in data:
+ if (not test in test_data['tests'] or
+ not meas in test_data['tests'][test]['measurements']):
+ samples.append(measurement_stats(None))
+ continue
+ test_i = test_data['tests'][test]
+ meas_i = test_i['measurements'][meas]
+ commit_num = get_data_item(meta, 'layers.meta.commit_count')
+ samples.append(measurement_stats(meas_i))
+ samples[-1]['commit_num'] = commit_num
+
+ absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean'])
+ new_meas['absdiff'] = absdiff
+ new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff)
+ new_meas['reldiff'] = "{:+.1f} %".format(absdiff * 100 / samples[id_comp]['mean'])
+ new_meas['samples'] = samples
+ new_meas['value'] = samples[-1]
+ new_meas['value_type'] = samples[-1]['val_cls']
+
+ # Compare buildstats
+ bs_key = test + '.' + meas
+ rev = metadata['commit_num']['value']
+ comp_rev = metadata['commit_num']['value_old']
+ if (rev in buildstats and bs_key in buildstats[rev] and
+ comp_rev in buildstats and bs_key in buildstats[comp_rev]):
+ new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
+ buildstats[rev][bs_key])
+
+
+ new_test['measurements'].append(new_meas)
+ tests.append(new_test)
+
+ # Chart options
+ chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'),
+ 'max': get_data_item(data[-1][0], 'layers.meta.commit_count')}
+ }
+
+ print(html.template.render(title="Build Perf Test Report",
+ metadata=metadata, test_data=tests,
+ chart_opts=chart_opts))
+
+
+def get_buildstats(repo, notes_ref, revs, outdir=None):
+ """Get the buildstats from git notes"""
+ full_ref = 'refs/notes/' + notes_ref
+ if not repo.rev_parse(full_ref):
+ log.error("No buildstats found, please try running "
+ "'git fetch origin %s:%s' to fetch them from the remote",
+ full_ref, full_ref)
+ return
+
+ missing = False
+ buildstats = {}
+ log.info("Parsing buildstats from 'refs/notes/%s'", notes_ref)
+ for rev in revs:
+ buildstats[rev.commit_number] = {}
+ log.debug('Dumping buildstats for %s (%s)', rev.commit_number,
+ rev.commit)
+ for tag in rev.tags:
+ log.debug(' %s', tag)
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
+ 'show', tag + '^0']))
+ except GitError:
+ log.warning("Buildstats not found for %s", tag)
+ bs_all = {}
+ missing = True
+
+ for measurement, bs in bs_all.items():
+ # Write out onto disk
+ if outdir:
+ tag_base, run_id = tag.rsplit('/', 1)
+ tag_base = tag_base.replace('/', '_')
+ bs_dir = os.path.join(outdir, measurement, tag_base)
+ if not os.path.exists(bs_dir):
+ os.makedirs(bs_dir)
+ with open(os.path.join(bs_dir, run_id + '.json'), 'w') as f:
+ json.dump(bs, f, indent=2)
+
+ # Read buildstats into a dict
+ _bs = BuildStats.from_json(bs)
+ if measurement not in buildstats[rev.commit_number]:
+ buildstats[rev.commit_number][measurement] = _bs
+ else:
+ buildstats[rev.commit_number][measurement].aggregate(_bs)
+
+ if missing:
+ log.info("Buildstats were missing for some test runs, please "
+ "run 'git fetch origin %s:%s' and try again",
+ full_ref, full_ref)
+
+ return buildstats
+
+
+def auto_args(repo, args):
+ """Guess arguments, if not defined by the user"""
+ # Get the latest commit in the repo
+ log.debug("Guessing arguments from the latest commit")
+ msg = repo.run_cmd(['log', '-1', '--branches', '--remotes', '--format=%b'])
+ for line in msg.splitlines():
+ split = line.split(':', 1)
+ if len(split) != 2:
+ continue
+
+ key = split[0]
+ val = split[1].strip()
+ if key == 'hostname':
+ log.debug("Using hostname %s", val)
+ args.hostname = val
+ elif key == 'branch':
+ log.debug("Using branch %s", val)
+ args.branch = val
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ description = """
+Examine build performance test results from a Git repository"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ description=description)
+
+ parser.add_argument('--debug', '-d', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--repo', '-r', required=True,
+ help="Results repository (local git clone)")
+ parser.add_argument('--list', '-l', action='count',
+ help="List available test runs")
+ parser.add_argument('--html', action='store_true',
+ help="Generate report in html format")
+ group = parser.add_argument_group('Tag and revision')
+ group.add_argument('--tag-name', '-t',
+ default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
+ help="Tag name (pattern) for finding results")
+ group.add_argument('--hostname', '-H')
+ group.add_argument('--branch', '-B', default='master')
+ group.add_argument('--machine', default='qemux86')
+ group.add_argument('--history-length', default=25, type=int,
+ help="Number of tested revisions to plot in html report")
+ group.add_argument('--commit',
+ help="Revision to search for")
+ group.add_argument('--commit-number',
+ help="Revision number to search for, redundant if "
+ "--commit is specified")
+ group.add_argument('--commit2',
+ help="Revision to compare with")
+ group.add_argument('--commit-number2',
+ help="Revision number to compare with, redundant if "
+ "--commit2 is specified")
+ parser.add_argument('--dump-buildstats', nargs='?', const='.',
+ help="Dump buildstats of the tests")
+
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ repo = GitRepo(args.repo)
+
+ if args.list:
+ list_test_revs(repo, args.tag_name, args.list, hostname=args.hostname)
+ return 0
+
+ # Determine hostname which to use
+ if not args.hostname:
+ auto_args(repo, args)
+
+ revs = get_test_revs(repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch, machine=args.machine)
+ if len(revs) < 2:
+ log.error("%d tester revisions found, unable to generate report",
+ len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ log.warning("Ignoring --commit-number as --commit was specified")
+ index1 = rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.commit2:
+ if args.commit_number2:
+ log.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ else:
+ log.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ index_l = min(index1, index2)
+ index_r = max(index1, index2)
+
+ rev_l = revs[index_l]
+ rev_r = revs[index_r]
+ log.debug("Using 'left' revision %s (%s), %s test runs:\n %s",
+ rev_l.commit_number, rev_l.commit, len(rev_l.tags),
+ '\n '.join(rev_l.tags))
+ log.debug("Using 'right' revision %s (%s), %s test runs:\n %s",
+ rev_r.commit_number, rev_r.commit, len(rev_r.tags),
+ '\n '.join(rev_r.tags))
+
+ # Check report format used in the repo (assume all reports in the same fmt)
+ xml = is_xml_format(repo, revs[index_r].tags[-1])
+
+ if args.html:
+ index_0 = max(0, min(index_l, index_r - args.history_length))
+ rev_range = range(index_0, index_r + 1)
+ else:
+ # We do not need range of commits for text report (no graphs)
+ index_0 = index_l
+ rev_range = (index_l, index_r)
+
+ # Read raw data
+ log.debug("Reading %d revisions, starting from %s (%s)",
+ len(rev_range), revs[index_0].commit_number, revs[index_0].commit)
+ raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range]
+
+ data = []
+ for raw_m, raw_d in raw_data:
+ data.append(AggregateTestData(aggregate_metadata(raw_m),
+ aggregate_data(raw_d)))
+
+ # Read buildstats only when needed
+ buildstats = None
+ if args.dump_buildstats or args.html:
+ outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
+ args.machine)
+ buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+
+ # Print report
+ if not args.html:
+ print_diff_report(data[0].metadata, data[0].results,
+ data[1].metadata, data[1].results)
+ else:
+ # Re-map 'left' list index to the data table where index_0 maps to 0
+ print_html_report(data, index_l - index_0, buildstats)
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/oe-build-perf-test b/poky/scripts/oe-build-perf-test
new file mode 100755
index 000000000..669470fa9
--- /dev/null
+++ b/poky/scripts/oe-build-perf-test
@@ -0,0 +1,223 @@
+#!/usr/bin/python3
+#
+# Build performance test script
+#
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Build performance test script"""
+import argparse
+import errno
+import fcntl
+import json
+import logging
+import os
+import re
+import shutil
+import sys
+from datetime import datetime
+
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
+import scriptpath
+scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
+import oeqa.buildperf
+from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
+ BuildPerfTestRunner, KernelDropCaches)
+from oeqa.utils.commands import runCmd
+from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
+
+
+# Set-up logging
+LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
+logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
+ datefmt='%Y-%m-%d %H:%M:%S')
+log = logging.getLogger()
+
+
+def acquire_lock(lock_f):
+ """Acquire flock on file"""
+ log.debug("Acquiring lock %s", os.path.abspath(lock_f.name))
+ try:
+ fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError as err:
+ if err.errno == errno.EAGAIN:
+ return False
+ raise
+ log.debug("Lock acquired")
+ return True
+
+
+def pre_run_sanity_check():
+ """Sanity check of build environment"""
+ build_dir = os.environ.get("BUILDDIR")
+ if not build_dir:
+ log.error("BUILDDIR not set. Please run the build environmnent setup "
+ "script.")
+ return False
+ if os.getcwd() != build_dir:
+ log.error("Please run this script under BUILDDIR (%s)", build_dir)
+ return False
+
+ ret = runCmd('which bitbake', ignore_status=True)
+ if ret.status:
+ log.error("bitbake command not found")
+ return False
+ return True
+
+def setup_file_logging(log_file):
+ """Setup loggin to file"""
+ log_dir = os.path.dirname(log_file)
+ if not os.path.exists(log_dir):
+ os.makedirs(log_dir)
+ formatter = logging.Formatter(LOG_FORMAT)
+ handler = logging.FileHandler(log_file)
+ handler.setFormatter(formatter)
+ log.addHandler(handler)
+
+
+def archive_build_conf(out_dir):
+ """Archive build/conf to test results"""
+ src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
+ tgt_dir = os.path.join(out_dir, 'build', 'conf')
+ os.makedirs(os.path.dirname(tgt_dir))
+ shutil.copytree(src_dir, tgt_dir)
+
+
+def update_globalres_file(result_obj, filename, metadata):
+ """Write results to globalres csv file"""
+ # Map test names to time and size columns in globalres
+ # The tuples represent index and length of times and sizes
+ # respectively
+ gr_map = {'test1': ((0, 1), (8, 1)),
+ 'test12': ((1, 1), (None, None)),
+ 'test13': ((2, 1), (9, 1)),
+ 'test2': ((3, 1), (None, None)),
+ 'test3': ((4, 3), (None, None)),
+ 'test4': ((7, 1), (10, 2))}
+
+ values = ['0'] * 12
+ for status, test, _ in result_obj.all_results():
+ if status in ['ERROR', 'SKIPPED']:
+ continue
+ (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
+ if t_ind is not None:
+ values[t_ind:t_ind + t_len] = test.times
+ if s_ind is not None:
+ values[s_ind:s_ind + s_len] = test.sizes
+
+ log.debug("Writing globalres log to %s", filename)
+ rev_info = metadata['layers']['meta']
+ with open(filename, 'a') as fobj:
+ fobj.write('{},{}:{},{},'.format(metadata['hostname'],
+ rev_info['branch'],
+ rev_info['commit'],
+ rev_info['commit']))
+ fobj.write(','.join(values) + '\n')
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument('-D', '--debug', action='store_true',
+ help='Enable debug level logging')
+ parser.add_argument('--globalres-file',
+ type=os.path.abspath,
+ help="Append results to 'globalres' csv file")
+ parser.add_argument('--lock-file', default='./oe-build-perf.lock',
+ metavar='FILENAME', type=os.path.abspath,
+ help="Lock file to use")
+ parser.add_argument('-o', '--out-dir', default='results-{date}',
+ type=os.path.abspath,
+ help="Output directory for test results")
+ parser.add_argument('-x', '--xml', action='store_true',
+ help='Enable JUnit xml output')
+ parser.add_argument('--log-file',
+ default='{out_dir}/oe-build-perf-test.log',
+ help="Log file of this script")
+ parser.add_argument('--run-tests', nargs='+', metavar='TEST',
+ help="List of tests to run")
+
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+
+ # Set-up log file
+ out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
+ setup_file_logging(args.log_file.format(out_dir=out_dir))
+
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ lock_f = open(args.lock_file, 'w')
+ if not acquire_lock(lock_f):
+ log.error("Another instance of this script is running, exiting...")
+ return 1
+
+ if not pre_run_sanity_check():
+ return 1
+
+ # Check our capability to drop caches and ask pass if needed
+ KernelDropCaches.check()
+
+ # Load build perf tests
+ loader = BuildPerfTestLoader()
+ if args.run_tests:
+ suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
+ else:
+ suite = loader.loadTestsFromModule(oeqa.buildperf)
+
+ # Save test metadata
+ metadata = metadata_from_bb()
+ log.info("Testing Git revision branch:commit %s:%s (%s)",
+ metadata['layers']['meta']['branch'],
+ metadata['layers']['meta']['commit'],
+ metadata['layers']['meta']['commit_count'])
+ if args.xml:
+ write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
+ else:
+ with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
+ json.dump(metadata, fobj, indent=2)
+ archive_build_conf(out_dir)
+
+ runner = BuildPerfTestRunner(out_dir, verbosity=2)
+
+ # Suppress logger output to stderr so that the output from unittest
+ # is not mixed with occasional logger output
+ log.handlers[0].setLevel(logging.CRITICAL)
+
+ # Run actual tests
+ result = runner.run(suite)
+
+ # Restore logger output to stderr
+ log.handlers[0].setLevel(log.level)
+
+ if args.xml:
+ result.write_results_xml()
+ else:
+ result.write_results_json()
+ result.write_buildstats_json()
+ if args.globalres_file:
+ update_globalres_file(result, args.globalres_file, metadata)
+ if result.wasSuccessful():
+ return 0
+
+ return 2
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
diff --git a/poky/scripts/oe-buildenv-internal b/poky/scripts/oe-buildenv-internal
new file mode 100755
index 000000000..677387232
--- /dev/null
+++ b/poky/scripts/oe-buildenv-internal
@@ -0,0 +1,140 @@
+#!/bin/sh
+
+# OE-Core Build Environment Setup Script
+#
+# Copyright (C) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+if ! $(return >/dev/null 2>&1) ; then
+ echo 'oe-buildenv-internal: error: this script must be sourced'
+ echo ''
+ echo 'Usage: . $OEROOT/scripts/oe-buildenv-internal &&'
+ echo ''
+ echo 'OpenEmbedded oe-buildenv-internal - an internal script that is'
+ echo 'used in oe-init-build-env to initialize oe build environment'
+ echo ''
+ exit 2
+fi
+
+# It is assumed OEROOT is already defined when this is called
+if [ -z "$OEROOT" ]; then
+ echo >&2 "Error: OEROOT is not defined!"
+ return 1
+fi
+
+if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
+ echo >&2 "Error: The OE SDK/ADT was detected as already being present in this shell environment. Please use a clean shell when sourcing this environment script."
+ return 1
+fi
+
+# Make sure we're not using python v3.x as 'python', we don't support it.
+py_v2_check=$(/usr/bin/env python --version 2>&1 | grep "Python 3")
+if [ -n "$py_v2_check" ]; then
+ echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
+ echo >&2 "Please set up python v2 as your default 'python' interpreter."
+ return 1
+fi
+unset py_v2_check
+
+py_v27_check=$(python -c 'import sys; print sys.version_info >= (2,7,3)')
+if [ "$py_v27_check" != "True" ]; then
+ echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
+ echo >&2 "Please upgrade your python v2."
+fi
+unset py_v27_check
+
+# We potentially have code that doesn't parse correctly with older versions
+# of Python, and rather than fixing that and being eternally vigilant for
+# any other new feature use, just check the version here.
+py_v34_check=$(python3 -c 'import sys; print(sys.version_info >= (3,4,0))')
+if [ "$py_v34_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.4.0 or later as 'python3'"
+ return 1
+fi
+unset py_v34_check
+
+if [ -z "$BDIR" ]; then
+ if [ -z "$1" ]; then
+ BDIR="build"
+ else
+ BDIR="$1"
+ if [ "$BDIR" = "/" ]; then
+ echo >&2 "Error: / is not supported as a build directory."
+ return 1
+ fi
+
+ # Remove any possible trailing slashes. This is used to work around
+ # buggy readlink in Ubuntu 10.04 that doesn't ignore trailing slashes
+ # and hence "readlink -f new_dir_to_be_created/" returns empty.
+ BDIR=$(echo $BDIR | sed -re 's|/+$||')
+
+ BDIR=$(readlink -f "$BDIR")
+ if [ -z "$BDIR" ]; then
+ PARENTDIR=$(dirname "$1")
+ echo >&2 "Error: the directory $PARENTDIR does not exist?"
+ return 1
+ fi
+ fi
+ if [ -n "$2" ]; then
+ BITBAKEDIR="$2"
+ fi
+fi
+if [ "${BDIR#/}" != "$BDIR" ]; then
+ BUILDDIR="$BDIR"
+else
+ BUILDDIR="$(pwd)/$BDIR"
+fi
+unset BDIR
+
+if [ -z "$BITBAKEDIR" ]; then
+ BITBAKEDIR="$OEROOT/bitbake$BBEXTRA"
+ test -d "$BITBAKEDIR" || BITBAKEDIR="$OEROOT/../bitbake$BBEXTRA"
+fi
+
+BITBAKEDIR=$(readlink -f "$BITBAKEDIR")
+BUILDDIR=$(readlink -f "$BUILDDIR")
+BBPATH=$BUILDDIR
+
+export BBPATH
+
+if [ ! -d "$BITBAKEDIR" ]; then
+ echo >&2 "Error: The bitbake directory ($BITBAKEDIR) does not exist! Please ensure a copy of bitbake exists at this location or specify an alternative path on the command line"
+ return 1
+fi
+
+# Make sure our paths are at the beginning of $PATH
+for newpath in "$BITBAKEDIR/bin" "$OEROOT/scripts"; do
+ # Remove any existences of $newpath from $PATH
+ PATH=$(echo $PATH | sed -re "s#(^|:)$newpath(:|$)#\2#g;s#^:##")
+
+ # Add $newpath to $PATH
+ PATH="$newpath:$PATH"
+done
+unset BITBAKEDIR newpath
+
+# Used by the runqemu script
+export BUILDDIR
+export PATH
+
+BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
+HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
+all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
+SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
+SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE"
+
+BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
+
+export BB_ENV_EXTRAWHITE
diff --git a/poky/scripts/oe-check-sstate b/poky/scripts/oe-check-sstate
new file mode 100755
index 000000000..d06efe436
--- /dev/null
+++ b/poky/scripts/oe-check-sstate
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+
+# Query which tasks will be restored from sstate
+#
+# Copyright 2016 Intel Corporation
+# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import subprocess
+import tempfile
+import shutil
+import re
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+import argparse_oe
+
+
+def translate_virtualfns(tasks):
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil()
+ try:
+ tinfoil.prepare(False)
+
+ recipecaches = tinfoil.cooker.recipecaches
+ outtasks = []
+ for task in tasks:
+ (mc, fn, taskname) = bb.runqueue.split_tid(task)
+ if taskname.endswith('_setscene'):
+ taskname = taskname[:-9]
+ outtasks.append('%s:%s' % (recipecaches[mc].pkg_fn[fn], taskname))
+ finally:
+ tinfoil.shutdown()
+ return outtasks
+
+
+def check(args):
+ tmpdir = tempfile.mkdtemp(prefix='oe-check-sstate-')
+ try:
+ env = os.environ.copy()
+ if not args.same_tmpdir:
+ env['BB_ENV_EXTRAWHITE'] = env.get('BB_ENV_EXTRAWHITE', '') + ' TMPDIR_forcevariable'
+ env['TMPDIR_forcevariable'] = tmpdir
+
+ try:
+ output = subprocess.check_output(
+ 'bitbake -n %s' % ' '.join(args.target),
+ stderr=subprocess.STDOUT,
+ env=env,
+ shell=True)
+
+ task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ tasks = []
+ for line in output.decode('utf-8').splitlines():
+ res = task_re.match(line)
+ if res:
+ tasks.append(res.group(1))
+ outtasks = translate_virtualfns(tasks)
+ except subprocess.CalledProcessError as e:
+ print('ERROR: bitbake failed:\n%s' % e.output.decode('utf-8'))
+ return e.returncode
+ finally:
+ shutil.rmtree(tmpdir)
+
+ if args.log:
+ with open(args.log, 'wb') as f:
+ f.write(output)
+
+ if args.outfile:
+ with open(args.outfile, 'w') as f:
+ for task in outtasks:
+ f.write('%s\n' % task)
+ else:
+ for task in outtasks:
+ print(task)
+
+ return 0
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description='OpenEmbedded sstate check tool. Does a dry-run to check restoring the specified targets from shared state, and lists the tasks that would be restored. Set BB_SETSCENE_ENFORCE=1 in the environment if you wish to ensure real tasks are disallowed.')
+
+ parser.add_argument('target', nargs='+', help='Target to check')
+ parser.add_argument('-o', '--outfile', help='Write list to a file instead of stdout')
+ parser.add_argument('-l', '--log', help='Write full log to a file')
+ parser.add_argument('-s', '--same-tmpdir', action='store_true', help='Use same TMPDIR for check (list will then be dependent on what tasks have executed previously)')
+
+ parser.set_defaults(func=check)
+
+ args = parser.parse_args()
+
+ ret = args.func(args)
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/oe-depends-dot b/poky/scripts/oe-depends-dot
new file mode 100755
index 000000000..5cec23bf0
--- /dev/null
+++ b/poky/scripts/oe-depends-dot
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2018 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os
+import sys
+import argparse
+import logging
+import re
+
+class Dot(object):
+ def __init__(self):
+ parser = argparse.ArgumentParser(
+ description="Analyse recipe-depends.dot generated by bitbake -g",
+ epilog="Use %(prog)s --help to get help")
+ parser.add_argument("dotfile",
+ help = "Specify the dotfile", nargs = 1, action='store', default='')
+ parser.add_argument("-k", "--key",
+ help = "Specify the key, e.g., recipe name",
+ action="store", default='')
+ parser.add_argument("-d", "--depends",
+ help = "Print the key's dependencies",
+ action="store_true", default=False)
+ parser.add_argument("-w", "--why",
+ help = "Print why the key is built",
+ action="store_true", default=False)
+ parser.add_argument("-r", "--remove",
+ help = "Remove duplicated dependencies to reduce the size of the dot files."
+ " For example, A->B, B->C, A->C, then A->C can be removed.",
+ action="store_true", default=False)
+
+ self.args = parser.parse_args()
+
+ if len(sys.argv) != 3 and len(sys.argv) < 5:
+ print('ERROR: Not enough args, see --help for usage')
+
+ def main(self):
+ #print(self.args.dotfile[0])
+ # The format is {key: depends}
+ depends = {}
+ with open(self.args.dotfile[0], 'r') as f:
+ for line in f.readlines():
+ if ' -> ' not in line:
+ continue
+ line_no_quotes = line.replace('"', '')
+ m = re.match("(.*) -> (.*)", line_no_quotes)
+ if not m:
+ print('WARNING: Found unexpected line: %s' % line)
+ continue
+ key = m.group(1)
+ if key == "meta-world-pkgdata":
+ continue
+ dep = m.group(2)
+ if key in depends:
+ if not key in depends[key]:
+ depends[key].add(dep)
+ else:
+ print('WARNING: Fonud duplicated line: %s' % line)
+ else:
+ depends[key] = set()
+ depends[key].add(dep)
+
+ if self.args.remove:
+ reduced_depends = {}
+ for k, deps in depends.items():
+ child_deps = set()
+ added = set()
+ # Both direct and indirect depends are already in the dict, so
+ # we don't have to do this recursively.
+ for dep in deps:
+ if dep in depends:
+ child_deps |= depends[dep]
+
+ reduced_depends[k] = deps - child_deps
+ outfile= '%s-reduced%s' % (self.args.dotfile[0][:-4], self.args.dotfile[0][-4:])
+ with open(outfile, 'w') as f:
+ print('Saving reduced dot file to %s' % outfile)
+ f.write('digraph depends {\n')
+ for k, v in reduced_depends.items():
+ for dep in v:
+ f.write('"%s" -> "%s"\n' % (k, dep))
+ f.write('}\n')
+ sys.exit(0)
+
+ if self.args.key not in depends:
+ print("ERROR: Can't find key %s in %s" % (self.args.key, self.args.dotfile[0]))
+ sys.exit(1)
+
+ if self.args.depends:
+ if self.args.key in depends:
+ print('Depends: %s' % ' '.join(depends[self.args.key]))
+
+ reverse_deps = []
+ if self.args.why:
+ for k, v in depends.items():
+ if self.args.key in v and not k in reverse_deps:
+ reverse_deps.append(k)
+ print('Because: %s' % ' '.join(reverse_deps))
+
+if __name__ == "__main__":
+ try:
+ dot = Dot()
+ ret = dot.main()
+ except Exception as esc:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/oe-find-native-sysroot b/poky/scripts/oe-find-native-sysroot
new file mode 100755
index 000000000..350ea2137
--- /dev/null
+++ b/poky/scripts/oe-find-native-sysroot
@@ -0,0 +1,115 @@
+#!/bin/bash
+#
+# Find a native sysroot to use - either from an in-tree OE build or
+# from a toolchain installation. It then ensures the variable
+# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets
+# $PSEUDO to the path of the pseudo binary.
+#
+# This script is intended to be run within other scripts by source'ing
+# it, e.g:
+#
+# SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot`
+# . $SYSROOT_SETUP_SCRIPT <recipe>
+#
+# This script will terminate execution of your calling program unless
+# you set a variable $SKIP_STRICT_SYSROOT_CHECK to a non-empty string
+# beforehand.
+#
+# Copyright (c) 2010 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
+ echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
+ echo ''
+ echo 'OpenEmbedded find-native-sysroot - helper script to set'
+ echo 'environment variables OECORE_NATIVE_SYSROOT and PSEUDO'
+ echo 'to the path of the native sysroot directory and pseudo'
+ echo 'executable binary'
+ echo ''
+ echo 'options:'
+ echo ' recipe its STAGING_DIR_NATIVE is used as native sysroot'
+ echo ' -h, --help show this help message and exit'
+ echo ''
+ exit 2
+fi
+
+# Global vars
+BITBAKE_E=""
+set_oe_native_sysroot(){
+ echo "Running bitbake -e $1"
+ BITBAKE_E="`bitbake -e $1`"
+ OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE= | cut -d '"' -f2`
+
+ if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
+ # This indicates that there was an error running bitbake -e that
+ # the user needs to be informed of
+ echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
+ echo "Here is the output from bitbake -e $1"
+ echo $BITBAKE_E
+ exit 1
+ fi
+}
+
+if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
+ BITBAKE=`which bitbake 2> /dev/null`
+ if [ "x$BITBAKE" != "x" ]; then
+ if [ "$UID" = "0" ]; then
+ # Root cannot run bitbake unless sanity checking is disabled
+ if [ ! -d "./conf" ]; then
+ echo "Error: root cannot run bitbake by default, and I cannot find a ./conf directory to be able to disable sanity checking"
+ exit 1
+ fi
+ touch conf/sanity.conf
+ set_oe_native_sysroot $1
+ rm -f conf/sanity.conf
+ else
+ set_oe_native_sysroot $1
+ fi
+ else
+ echo "Error: Unable to locate bitbake command."
+ echo "Did you forget to source the build environment setup script?"
+
+ if [ -z "$SKIP_STRICT_SYSROOT_CHECK" ]; then
+ exit 1
+ fi
+ fi
+fi
+
+if [ ! -e "$OECORE_NATIVE_SYSROOT/" ]; then
+ echo "Error: $OECORE_NATIVE_SYSROOT doesn't exist."
+
+ if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
+ if [[ $1 =~ .*native.* ]]; then
+ echo "Have you run 'bitbake $1 -caddto_recipe_sysroot'?"
+ else
+ echo "Have you run 'bitbake $1 '?"
+ fi
+ else
+ echo "This shouldn't happen - something is wrong with your toolchain installation"
+ fi
+
+ if [ -z "$SKIP_STRICT_SYSROOT_CHECK" ]; then
+ exit 1
+ fi
+fi
+
+# Set up pseudo command
+pseudo="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
+if [ -e "$pseudo" ]; then
+ echo "PSEUDO=$pseudo"
+ PSEUDO="$pseudo"
+else
+ echo "PSEUDO $pseudo is not found."
+fi
diff --git a/poky/scripts/oe-git-archive b/poky/scripts/oe-git-archive
new file mode 100755
index 000000000..ab19cb9aa
--- /dev/null
+++ b/poky/scripts/oe-git-archive
@@ -0,0 +1,271 @@
+#!/usr/bin/python3
+#
+# Helper script for committing data to git and pushing upstream
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import glob
+import json
+import logging
+import math
+import os
+import re
+import sys
+from collections import namedtuple, OrderedDict
+from datetime import datetime, timedelta, tzinfo
+from operator import attrgetter
+
+# Import oe and bitbake libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo, GitError
+from oeqa.utils.metadata import metadata_from_bb
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger()
+
+
+class ArchiveError(Exception):
+ """Internal error handling of this script"""
+
+
+def format_str(string, fields):
+ """Format string using the given fields (dict)"""
+ try:
+ return string.format(**fields)
+ except KeyError as err:
+ raise ArchiveError("Unable to expand string '{}': unknown field {} "
+ "(valid fields are: {})".format(
+ string, err, ', '.join(sorted(fields.keys()))))
+
+
+def init_git_repo(path, no_create, bare):
+ """Initialize local Git repository"""
+ path = os.path.abspath(path)
+ if os.path.isfile(path):
+ raise ArchiveError("Invalid Git repo at {}: path exists but is not a "
+ "directory".format(path))
+ if not os.path.isdir(path) or not os.listdir(path):
+ if no_create:
+ raise ArchiveError("No git repo at {}, refusing to create "
+ "one".format(path))
+ if not os.path.isdir(path):
+ try:
+ os.mkdir(path)
+ except (FileNotFoundError, PermissionError) as err:
+ raise ArchiveError("Failed to mkdir {}: {}".format(path, err))
+ if not os.listdir(path):
+ log.info("Initializing a new Git repo at %s", path)
+ repo = GitRepo.init(path, bare)
+ try:
+ repo = GitRepo(path, is_topdir=True)
+ except GitError:
+ raise ArchiveError("Non-empty directory that is not a Git repository "
+ "at {}\nPlease specify an existing Git repository, "
+ "an empty directory or a non-existing directory "
+ "path.".format(path))
+ return repo
+
+
+def git_commit_data(repo, data_dir, branch, message, exclude, notes):
+ """Commit data into a Git repository"""
+ log.info("Committing data into to branch %s", branch)
+ tmp_index = os.path.join(repo.git_dir, 'index.oe-git-archive')
+ try:
+ # Create new tree object from the data
+ env_update = {'GIT_INDEX_FILE': tmp_index,
+ 'GIT_WORK_TREE': os.path.abspath(data_dir)}
+ repo.run_cmd('add .', env_update)
+
+ # Remove files that are excluded
+ if exclude:
+ repo.run_cmd(['rm', '--cached'] + [f for f in exclude], env_update)
+
+ tree = repo.run_cmd('write-tree', env_update)
+
+ # Create new commit object from the tree
+ parent = repo.rev_parse(branch)
+ git_cmd = ['commit-tree', tree, '-m', message]
+ if parent:
+ git_cmd += ['-p', parent]
+ commit = repo.run_cmd(git_cmd, env_update)
+
+ # Create git notes
+ for ref, filename in notes:
+ ref = ref.format(branch_name=branch)
+ repo.run_cmd(['notes', '--ref', ref, 'add',
+ '-F', os.path.abspath(filename), commit])
+
+ # Update branch head
+ git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
+ if parent:
+ git_cmd.append(parent)
+ repo.run_cmd(git_cmd)
+
+ # Update current HEAD, if we're on branch 'branch'
+ if not repo.bare and repo.get_current_branch() == branch:
+ log.info("Updating %s HEAD to latest commit", repo.top_dir)
+ repo.run_cmd('reset --hard')
+
+ return commit
+ finally:
+ if os.path.exists(tmp_index):
+ os.unlink(tmp_index)
+
+
+def expand_tag_strings(repo, name_pattern, msg_subj_pattern, msg_body_pattern,
+ keywords):
+ """Generate tag name and message, with support for running id number"""
+ keyws = keywords.copy()
+ # Tag number is handled specially: if not defined, we autoincrement it
+ if 'tag_number' not in keyws:
+ # Fill in all other fields than 'tag_number'
+ keyws['tag_number'] = '{tag_number}'
+ tag_re = format_str(name_pattern, keyws)
+ # Replace parentheses for proper regex matching
+ tag_re = tag_re.replace('(', '\(').replace(')', '\)') + '$'
+ # Inject regex group pattern for 'tag_number'
+ tag_re = tag_re.format(tag_number='(?P<tag_number>[0-9]{1,5})')
+
+ keyws['tag_number'] = 0
+ for existing_tag in repo.run_cmd('tag').splitlines():
+ match = re.match(tag_re, existing_tag)
+
+ if match and int(match.group('tag_number')) >= keyws['tag_number']:
+ keyws['tag_number'] = int(match.group('tag_number')) + 1
+
+ tag_name = format_str(name_pattern, keyws)
+ msg_subj= format_str(msg_subj_pattern.strip(), keyws)
+ msg_body = format_str(msg_body_pattern, keyws)
+ return tag_name, msg_subj + '\n\n' + msg_body
+
+
+def parse_args(argv):
+ """Parse command line arguments"""
+ parser = argparse.ArgumentParser(
+ description="Commit data to git and push upstream",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+ parser.add_argument('--debug', '-D', action='store_true',
+ help="Verbose logging")
+ parser.add_argument('--git-dir', '-g', required=True,
+ help="Local git directory to use")
+ parser.add_argument('--no-create', action='store_true',
+ help="If GIT_DIR is not a valid Git repository, do not "
+ "try to create one")
+ parser.add_argument('--bare', action='store_true',
+ help="Initialize a bare repository when creating a "
+ "new one")
+ parser.add_argument('--push', '-p', nargs='?', default=False, const=True,
+ help="Push to remote")
+ parser.add_argument('--branch-name', '-b',
+ default='{hostname}/{branch}/{machine}',
+ help="Git branch name (pattern) to use")
+ parser.add_argument('--no-tag', action='store_true',
+ help="Do not create Git tag")
+ parser.add_argument('--tag-name', '-t',
+ default='{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}',
+ help="Tag name (pattern) to use")
+ parser.add_argument('--commit-msg-subject',
+ default='Results of {branch}:{commit} on {hostname}',
+ help="Subject line (pattern) to use in the commit message")
+ parser.add_argument('--commit-msg-body',
+ default='branch: {branch}\ncommit: {commit}\nhostname: {hostname}',
+ help="Commit message body (pattern)")
+ parser.add_argument('--tag-msg-subject',
+ default='Test run #{tag_number} of {branch}:{commit} on {hostname}',
+ help="Subject line (pattern) of the tag message")
+ parser.add_argument('--tag-msg-body',
+ default='',
+ help="Tag message body (pattern)")
+ parser.add_argument('--exclude', action='append', default=[],
+ help="Glob to exclude files from the commit. Relative "
+ "to DATA_DIR. May be specified multiple times")
+ parser.add_argument('--notes', nargs=2, action='append', default=[],
+ metavar=('GIT_REF', 'FILE'),
+ help="Add a file as a note under refs/notes/GIT_REF. "
+ "{branch_name} in GIT_REF will be expanded to the "
+ "actual target branch name (specified by "
+ "--branch-name). This option may be specified "
+ "multiple times.")
+ parser.add_argument('data_dir', metavar='DATA_DIR',
+ help="Data to commit")
+ return parser.parse_args(argv)
+
+
+def main(argv=None):
+ """Script entry point"""
+ args = parse_args(argv)
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ try:
+ if not os.path.isdir(args.data_dir):
+ raise ArchiveError("Not a directory: {}".format(args.data_dir))
+
+ data_repo = init_git_repo(args.git_dir, args.no_create, args.bare)
+
+ # Get keywords to be used in tag and branch names and messages
+ metadata = metadata_from_bb()
+ keywords = {'hostname': metadata['hostname'],
+ 'branch': metadata['layers']['meta']['branch'],
+ 'commit': metadata['layers']['meta']['commit'],
+ 'commit_count': metadata['layers']['meta']['commit_count'],
+ 'machine': metadata['config']['MACHINE']}
+
+ # Expand strings early in order to avoid getting into inconsistent
+ # state (e.g. no tag even if data was committed)
+ commit_msg = format_str(args.commit_msg_subject.strip(), keywords)
+ commit_msg += '\n\n' + format_str(args.commit_msg_body, keywords)
+ branch_name = format_str(args.branch_name, keywords)
+ tag_name = None
+ if not args.no_tag and args.tag_name:
+ tag_name, tag_msg = expand_tag_strings(data_repo, args.tag_name,
+ args.tag_msg_subject,
+ args.tag_msg_body, keywords)
+
+ # Commit data
+ commit = git_commit_data(data_repo, args.data_dir, branch_name,
+ commit_msg, args.exclude, args.notes)
+
+ # Create tag
+ if tag_name:
+ log.info("Creating tag %s", tag_name)
+ data_repo.run_cmd(['tag', '-a', '-m', tag_msg, tag_name, commit])
+
+ # Push data to remote
+ if args.push:
+ cmd = ['push', '--tags']
+ # If no remote is given we push with the default settings from
+ # gitconfig
+ if args.push is not True:
+ notes_refs = ['refs/notes/' + ref.format(branch_name=branch_name)
+ for ref, _ in args.notes]
+ cmd.extend([args.push, branch_name] + notes_refs)
+ log.info("Pushing data to remote")
+ data_repo.run_cmd(cmd)
+
+ except ArchiveError as err:
+ log.error(str(err))
+ return 1
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/oe-git-proxy b/poky/scripts/oe-git-proxy
new file mode 100755
index 000000000..7a43fe6a6
--- /dev/null
+++ b/poky/scripts/oe-git-proxy
@@ -0,0 +1,181 @@
+#!/bin/bash
+
+# oe-git-proxy is a simple tool to be via GIT_PROXY_COMMAND. It uses socat
+# to make SOCKS5 or HTTPS proxy connections.
+# It uses ALL_PROXY or all_proxy or http_proxy to determine the proxy server,
+# protocol, and port.
+# It uses NO_PROXY to skip using the proxy for a comma delimited list of
+# hosts, host globs (*.example.com), IPs, or CIDR masks (192.168.1.0/24). It
+# is known to work with both bash and dash shells.
+#
+# Example ALL_PROXY values:
+# ALL_PROXY=socks://socks.example.com:1080
+# ALL_PROXY=https://proxy.example.com:8080
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# AUTHORS
+# Darren Hart <dvhart@linux.intel.com>
+
+if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+ echo 'oe-git-proxy: error: the following arguments are required: host port'
+ echo 'Usage: oe-git-proxy host port'
+ echo ''
+ echo 'OpenEmbedded git-proxy - a simple tool to be used via GIT_PROXY_COMMAND.'
+ echo 'It uses socat to make SOCKS or HTTPS proxy connections.'
+ echo 'It uses ALL_PROXY to determine the proxy server, protocol, and port.'
+ echo 'It uses NO_PROXY to skip using the proxy for a comma delimited list'
+ echo 'of hosts, host globs (*.example.com), IPs, or CIDR masks (192.168.1.0/24).'
+ echo 'It is known to work with both bash and dash shells.runs native tools'
+ echo ''
+ echo 'arguments:'
+ echo ' host proxy host to use'
+ echo ' port proxy port to use'
+ echo ''
+ echo 'options:'
+ echo ' -h, --help show this help message and exit'
+ echo ''
+ exit 2
+fi
+
+# Locate the netcat binary
+SOCAT=$(which socat 2>/dev/null)
+if [ $? -ne 0 ]; then
+ echo "ERROR: socat binary not in PATH" 1>&2
+ exit 1
+fi
+METHOD=""
+
+# Test for a valid IPV4 quad with optional bitmask
+valid_ipv4() {
+ echo $1 | egrep -q "^([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])){3}(/(3[0-2]|[1-2]?[0-9]))?$"
+ return $?
+}
+
+# Convert an IPV4 address into a 32bit integer
+ipv4_val() {
+ IP="$1"
+ SHIFT=24
+ VAL=0
+ for B in ${IP//./ }; do
+ VAL=$(($VAL+$(($B<<$SHIFT))))
+ SHIFT=$(($SHIFT-8))
+ done
+ echo "$VAL"
+}
+
+# Determine if two IPs are equivalent, or if the CIDR contains the IP
+match_ipv4() {
+ CIDR=$1
+ IP=$2
+
+ if [ -z "${IP%%$CIDR}" ]; then
+ return 0
+ fi
+
+ # Determine the mask bitlength
+ BITS=${CIDR##*/}
+ [ "$BITS" != "$CIDR" ] || BITS=32
+ if [ -z "$BITS" ]; then
+ return 1
+ fi
+
+ IPVAL=$(ipv4_val $IP)
+ IP2VAL=$(ipv4_val ${CIDR%%/*})
+
+ # OR in the unmasked bits
+ for i in $(seq 0 $((32-$BITS))); do
+ IP2VAL=$(($IP2VAL|$((1<<$i))))
+ IPVAL=$(($IPVAL|$((1<<$i))))
+ done
+
+ if [ $IPVAL -eq $IP2VAL ]; then
+ return 0
+ fi
+ return 1
+}
+
+# Test to see if GLOB matches HOST
+match_host() {
+ HOST=$1
+ GLOB=$2
+
+ if [ -z "${HOST%%$GLOB}" ]; then
+ return 0
+ fi
+
+ # Match by netmask
+ if valid_ipv4 $GLOB; then
+ for HOST_IP in $(getent ahostsv4 $HOST | grep ' STREAM ' | cut -d ' ' -f 1) ; do
+ if valid_ipv4 $HOST_IP; then
+ match_ipv4 $GLOB $HOST_IP
+ if [ $? -eq 0 ]; then
+ return 0
+ fi
+ fi
+ done
+ fi
+
+ return 1
+}
+
+# If no proxy is set or needed, just connect directly
+METHOD="TCP:$1:$2"
+
+[ -z "${ALL_PROXY}" ] && ALL_PROXY=$all_proxy
+[ -z "${ALL_PROXY}" ] && ALL_PROXY=$http_proxy
+
+if [ -z "$ALL_PROXY" ]; then
+ exec $SOCAT STDIO $METHOD
+fi
+
+# Connect directly to hosts in NO_PROXY
+for H in ${NO_PROXY//,/ }; do
+ if match_host $1 $H; then
+ exec $SOCAT STDIO $METHOD
+ fi
+done
+
+# Proxy is necessary, determine protocol, server, and port
+# extract protocol
+PROTO=${ALL_PROXY%://*}
+# strip protocol:// from string
+ALL_PROXY=${ALL_PROXY#*://}
+# extract host & port parts:
+# 1) drop username/password
+PROXY=${ALL_PROXY##*@}
+# 2) remove optional trailing /?
+PROXY=${PROXY%%/*}
+# 3) extract optional port
+PORT=${PROXY##*:}
+if [ "$PORT" = "$PROXY" ]; then
+ PORT=""
+fi
+# 4) remove port
+PROXY=${PROXY%%:*}
+
+# extract username & password
+PROXYAUTH="${ALL_PROXY%@*}"
+[ "$PROXYAUTH" = "$ALL_PROXY" ] && PROXYAUTH=
+[ -n "${PROXYAUTH}" ] && PROXYAUTH=",proxyauth=${PROXYAUTH}"
+
+if [ "$PROTO" = "socks" ] || [ "$PROTO" = "socks4a" ]; then
+ if [ -z "$PORT" ]; then
+ PORT="1080"
+ fi
+ METHOD="SOCKS4A:$PROXY:$1:$2,socksport=$PORT"
+elif [ "$PROTO" = "socks4" ]; then
+ if [ -z "$PORT" ]; then
+ PORT="1080"
+ fi
+ METHOD="SOCKS4:$PROXY:$1:$2,socksport=$PORT"
+else
+ # Assume PROXY (http, https, etc)
+ if [ -z "$PORT" ]; then
+ PORT="8080"
+ fi
+ METHOD="PROXY:$PROXY:$1:$2,proxyport=${PORT}${PROXYAUTH}"
+fi
+
+exec $SOCAT STDIO "$METHOD"
diff --git a/poky/scripts/oe-gnome-terminal-phonehome b/poky/scripts/oe-gnome-terminal-phonehome
new file mode 100755
index 000000000..e02354883
--- /dev/null
+++ b/poky/scripts/oe-gnome-terminal-phonehome
@@ -0,0 +1,10 @@
+#!/bin/sh
+#
+# Gnome terminal won't tell us which PID a given command is run as
+# or allow a single instance so we can't tell when it completes.
+# This allows us to figure out the PID of the target so we can tell
+# when its done.
+#
+echo $$ > $1
+shift
+exec $@
diff --git a/poky/scripts/oe-pkgdata-util b/poky/scripts/oe-pkgdata-util
new file mode 100755
index 000000000..aea8a5751
--- /dev/null
+++ b/poky/scripts/oe-pkgdata-util
@@ -0,0 +1,630 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded pkgdata utility
+#
+# Written by: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Copyright 2012-2015 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+import sys
+import os
+import os.path
+import fnmatch
+import re
+import argparse
+import logging
+from collections import defaultdict, OrderedDict
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import argparse_oe
+logger = scriptutils.logger_create('pkgdatautil')
+
+def tinfoil_init():
+ import bb.tinfoil
+ import logging
+ tinfoil = bb.tinfoil.Tinfoil()
+ tinfoil.logger.setLevel(logging.WARNING)
+ tinfoil.prepare(True)
+ return tinfoil
+
+
+def glob(args):
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ globs = []
+ for globitem in args.glob:
+ globs.extend(globitem.split())
+
+ if not os.path.exists(args.pkglistfile):
+ logger.error('Unable to find package list file %s' % args.pkglistfile)
+ sys.exit(1)
+
+ skipval = "-locale-|^locale-base-|-dev$|-doc$|-dbg$|-staticdev$|^kernel-module-"
+ if args.exclude:
+ skipval += "|" + args.exclude
+ skipregex = re.compile(skipval)
+
+ skippedpkgs = set()
+ mappedpkgs = set()
+ with open(args.pkglistfile, 'r') as f:
+ for line in f:
+ fields = line.rstrip().split()
+ if not fields:
+ continue
+ pkg = fields[0]
+ # We don't care about other args (used to need the package architecture but the
+ # new pkgdata structure avoids the need for that)
+
+ # Skip packages for which there is no point applying globs
+ if skipregex.search(pkg):
+ logger.debug("%s -> !!" % pkg)
+ skippedpkgs.add(pkg)
+ continue
+
+ # Skip packages that already match the globs, so if e.g. a dev package
+ # is already installed and thus in the list, we don't process it any further
+ # Most of these will be caught by skipregex already, but just in case...
+ already = False
+ for g in globs:
+ if fnmatch.fnmatchcase(pkg, g):
+ already = True
+ break
+ if already:
+ skippedpkgs.add(pkg)
+ logger.debug("%s -> !" % pkg)
+ continue
+
+ # Define some functions
+ def revpkgdata(pkgn):
+ return os.path.join(args.pkgdata_dir, "runtime-reverse", pkgn)
+ def fwdpkgdata(pkgn):
+ return os.path.join(args.pkgdata_dir, "runtime", pkgn)
+ def readpn(pkgdata_file):
+ pn = ""
+ with open(pkgdata_file, 'r') as f:
+ for line in f:
+ if line.startswith("PN:"):
+ pn = line.split(': ')[1].rstrip()
+ return pn
+ def readrenamed(pkgdata_file):
+ renamed = ""
+ pn = os.path.basename(pkgdata_file)
+ with open(pkgdata_file, 'r') as f:
+ for line in f:
+ if line.startswith("PKG_%s:" % pn):
+ renamed = line.split(': ')[1].rstrip()
+ return renamed
+
+ # Main processing loop
+ for g in globs:
+ mappedpkg = ""
+ # First just try substitution (i.e. packagename -> packagename-dev)
+ newpkg = g.replace("*", pkg)
+ revlink = revpkgdata(newpkg)
+ if os.path.exists(revlink):
+ mappedpkg = os.path.basename(os.readlink(revlink))
+ fwdfile = fwdpkgdata(mappedpkg)
+ if os.path.exists(fwdfile):
+ mappedpkg = readrenamed(fwdfile)
+ if not os.path.exists(fwdfile + ".packaged"):
+ mappedpkg = ""
+ else:
+ revlink = revpkgdata(pkg)
+ if os.path.exists(revlink):
+ # Check if we can map after undoing the package renaming (by resolving the symlink)
+ origpkg = os.path.basename(os.readlink(revlink))
+ newpkg = g.replace("*", origpkg)
+ fwdfile = fwdpkgdata(newpkg)
+ if os.path.exists(fwdfile):
+ mappedpkg = readrenamed(fwdfile)
+ else:
+ # That didn't work, so now get the PN, substitute that, then map in the other direction
+ pn = readpn(revlink)
+ newpkg = g.replace("*", pn)
+ fwdfile = fwdpkgdata(newpkg)
+ if os.path.exists(fwdfile):
+ mappedpkg = readrenamed(fwdfile)
+ if not os.path.exists(fwdfile + ".packaged"):
+ mappedpkg = ""
+ else:
+ # Package doesn't even exist...
+ logger.debug("%s is not a valid package!" % (pkg))
+ break
+
+ if mappedpkg:
+ logger.debug("%s (%s) -> %s" % (pkg, g, mappedpkg))
+ mappedpkgs.add(mappedpkg)
+ else:
+ logger.debug("%s (%s) -> ?" % (pkg, g))
+
+ logger.debug("------")
+
+ print("\n".join(mappedpkgs - skippedpkgs))
+
+def read_value(args):
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ packages = []
+ if args.file:
+ with open(args.file, 'r') as f:
+ for line in f:
+ splitline = line.split()
+ if splitline:
+ packages.append(splitline[0])
+ else:
+ for pkgitem in args.pkg:
+ packages.extend(pkgitem.split())
+ if not packages:
+ logger.error("No packages specified")
+ sys.exit(1)
+
+ def readvar(pkgdata_file, valuename, mappedpkg):
+ val = ""
+ with open(pkgdata_file, 'r') as f:
+ for line in f:
+ if (line.startswith(valuename + ":") or
+ line.startswith(valuename + "_" + mappedpkg + ":")):
+ val = line.split(': ', 1)[1].rstrip()
+ return val
+
+ logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
+ for package in packages:
+ pkg_split = package.split('_')
+ pkg_name = pkg_split[0]
+ logger.debug("package: '%s'" % pkg_name)
+ revlink = os.path.join(args.pkgdata_dir, "runtime-reverse", pkg_name)
+ logger.debug(revlink)
+ if os.path.exists(revlink):
+ mappedpkg = os.path.basename(os.readlink(revlink))
+ qvar = args.valuename
+ value = readvar(revlink, qvar, mappedpkg)
+ if qvar == "PKGSIZE":
+ # PKGSIZE is now in bytes, but we we want it in KB
+ pkgsize = (int(value) + 1024 // 2) // 1024
+ value = "%d" % pkgsize
+ if args.unescape:
+ import codecs
+ # escape_decode() unescapes backslash encodings in byte streams
+ value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ if args.prefix_name:
+ print('%s %s' % (pkg_name, value))
+ else:
+ print(value)
+ else:
+ logger.debug("revlink %s does not exist", revlink)
+
+def lookup_pkglist(pkgs, pkgdata_dir, reverse):
+ if reverse:
+ mappings = OrderedDict()
+ for pkg in pkgs:
+ revlink = os.path.join(pkgdata_dir, "runtime-reverse", pkg)
+ logger.debug(revlink)
+ if os.path.exists(revlink):
+ mappings[pkg] = os.path.basename(os.readlink(revlink))
+ else:
+ mappings = defaultdict(list)
+ for pkg in pkgs:
+ pkgfile = os.path.join(pkgdata_dir, 'runtime', pkg)
+ if os.path.exists(pkgfile):
+ with open(pkgfile, 'r') as f:
+ for line in f:
+ fields = line.rstrip().split(': ')
+ if fields[0] == 'PKG_%s' % pkg:
+ mappings[pkg].append(fields[1])
+ break
+ return mappings
+
+def lookup_pkg(args):
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ pkgs = []
+ for pkgitem in args.pkg:
+ pkgs.extend(pkgitem.split())
+
+ mappings = lookup_pkglist(pkgs, args.pkgdata_dir, args.reverse)
+
+ if len(mappings) < len(pkgs):
+ missing = list(set(pkgs) - set(mappings.keys()))
+ logger.error("The following packages could not be found: %s" % ', '.join(missing))
+ sys.exit(1)
+
+ if args.reverse:
+ items = list(mappings.values())
+ else:
+ items = []
+ for pkg in pkgs:
+ items.extend(mappings.get(pkg, []))
+
+ print('\n'.join(items))
+
+def lookup_recipe(args):
+ def parse_pkgdatafile(pkgdatafile):
+ with open(pkgdatafile, 'r') as f:
+ found = False
+ for line in f:
+ if line.startswith('PN:'):
+ print("%s" % line.split(':', 1)[1].strip())
+ found = True
+ break
+ if not found:
+ logger.error("Unable to find PN entry in %s" % pkgdatafile)
+ sys.exit(1)
+
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ pkgs = []
+ for pkgitem in args.pkg:
+ pkgs.extend(pkgitem.split())
+
+ for pkg in pkgs:
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile)
+ break
+ pkgdatafile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
+ if not os.path.exists(pkgdatafile):
+ logger.error("The following packages could not be found: %s" % pkg)
+ sys.exit(1)
+ parse_pkgdatafile(pkgdatafile)
+
+def package_info(args):
+ def parse_pkgdatafile(pkgdatafile):
+ with open(pkgdatafile, 'r') as f:
+ pkge = ''
+ pkgr = ''
+ pe = ''
+ pr = ''
+ for line in f:
+ if line.startswith('PKGV:'):
+ pkg_version = line.split(':', 1)[1].strip()
+ elif line.startswith('PKGE:'):
+ pkge = line.split(':', 1)[1].strip()
+ elif line.startswith('PKGR:'):
+ pkgr = line.split(':', 1)[1].strip()
+ elif line.startswith('PN:'):
+ recipe = line.split(':', 1)[1].strip()
+ elif line.startswith('PV:'):
+ recipe_version = line.split(':', 1)[1].strip()
+ elif line.startswith('PE:'):
+ pe = line.split(':', 1)[1].strip()
+ elif line.startswith('PR:'):
+ pr = line.split(':', 1)[1].strip()
+ elif line.startswith('PKGSIZE'):
+ pkg_size = line.split(':', 1)[1].strip()
+ if pkge:
+ pkg_version = pkge + ":" + pkg_version
+ if pkgr:
+ pkg_version = pkg_version + "-" + pkgr
+ if pe:
+ recipe_version = pe + ":" + recipe_version
+ if pr:
+ recipe_version = recipe_version + "-" + pr
+ print("%s %s %s %s %s" % (pkg, pkg_version, recipe, recipe_version, pkg_size))
+
+ # Handle both multiple arguments and multiple values within an arg (old syntax)
+ packages = []
+ if args.file:
+ with open(args.file, 'r') as f:
+ for line in f:
+ splitline = line.split()
+ if splitline:
+ packages.append(splitline[0])
+ else:
+ for pkgitem in args.pkg:
+ packages.extend(pkgitem.split())
+ if not packages:
+ logger.error("No packages specified")
+ sys.exit(1)
+
+ for pkg in packages:
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile)
+ break
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime-reverse", pkg)
+ if not os.path.exists(pkgdatafile):
+ logger.error("Unable to find any built runtime package named %s" % pkg)
+ sys.exit(1)
+ parse_pkgdatafile(pkgdatafile)
+
+def get_recipe_pkgs(pkgdata_dir, recipe, unpackaged):
+ recipedatafile = os.path.join(pkgdata_dir, recipe)
+ if not os.path.exists(recipedatafile):
+ logger.error("Unable to find packaged recipe with name %s" % recipe)
+ sys.exit(1)
+ packages = []
+ with open(recipedatafile, 'r') as f:
+ for line in f:
+ fields = line.rstrip().split(': ')
+ if fields[0] == 'PACKAGES':
+ packages = fields[1].split()
+ break
+
+ if not unpackaged:
+ pkglist = []
+ for pkg in packages:
+ if os.path.exists(os.path.join(pkgdata_dir, 'runtime', '%s.packaged' % pkg)):
+ pkglist.append(pkg)
+ return pkglist
+ else:
+ return packages
+
+def list_pkgs(args):
+ found = False
+
+ def matchpkg(pkg):
+ if args.pkgspec:
+ matched = False
+ for pkgspec in args.pkgspec:
+ if fnmatch.fnmatchcase(pkg, pkgspec):
+ matched = True
+ break
+ if not matched:
+ return False
+ if not args.unpackaged:
+ if args.runtime:
+ revlink = os.path.join(args.pkgdata_dir, "runtime-reverse", pkg)
+ if os.path.exists(revlink):
+ # We're unlikely to get here if the package was not packaged, but just in case
+ # we add the symlinks for unpackaged files in the future
+ mappedpkg = os.path.basename(os.readlink(revlink))
+ if not os.path.exists(os.path.join(args.pkgdata_dir, 'runtime', '%s.packaged' % mappedpkg)):
+ return False
+ else:
+ return False
+ else:
+ if not os.path.exists(os.path.join(args.pkgdata_dir, 'runtime', '%s.packaged' % pkg)):
+ return False
+ return True
+
+ if args.recipe:
+ packages = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
+
+ if args.runtime:
+ pkglist = []
+ runtime_pkgs = lookup_pkglist(packages, args.pkgdata_dir, False)
+ for rtpkgs in runtime_pkgs.values():
+ pkglist.extend(rtpkgs)
+ else:
+ pkglist = packages
+
+ for pkg in pkglist:
+ if matchpkg(pkg):
+ found = True
+ print("%s" % pkg)
+ else:
+ if args.runtime:
+ searchdir = 'runtime-reverse'
+ else:
+ searchdir = 'runtime'
+
+ for root, dirs, files in os.walk(os.path.join(args.pkgdata_dir, searchdir)):
+ for fn in files:
+ if fn.endswith('.packaged'):
+ continue
+ if matchpkg(fn):
+ found = True
+ print("%s" % fn)
+ if not found:
+ if args.pkgspec:
+ logger.error("Unable to find any package matching %s" % args.pkgspec)
+ else:
+ logger.error("No packages found")
+ sys.exit(1)
+
+def list_pkg_files(args):
+ import json
+ def parse_pkgdatafile(pkgdatafile, long=False):
+ with open(pkgdatafile, 'r') as f:
+ found = False
+ for line in f:
+ if line.startswith('FILES_INFO:'):
+ found = True
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ if long:
+ width = max(map(len, dictval), default=0)
+ for fullpth in sorted(dictval):
+ print("\t{:{width}}\t{}".format(fullpth, dictval[fullpth], width=width))
+ else:
+ for fullpth in sorted(dictval):
+ print("\t%s" % fullpth)
+ break
+ if not found:
+ logger.error("Unable to find FILES_INFO entry in %s" % pkgdatafile)
+ sys.exit(1)
+
+
+ if args.recipe:
+ if args.pkg:
+ logger.error("list-pkg-files: If -p/--recipe is specified then a package name cannot be specified")
+ sys.exit(1)
+ recipepkglist = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
+ if args.runtime:
+ pkglist = []
+ runtime_pkgs = lookup_pkglist(recipepkglist, args.pkgdata_dir, False)
+ for rtpkgs in runtime_pkgs.values():
+ pkglist.extend(rtpkgs)
+ else:
+ pkglist = recipepkglist
+ else:
+ if not args.pkg:
+ logger.error("list-pkg-files: If -p/--recipe is not specified then at least one package name must be specified")
+ sys.exit(1)
+ pkglist = args.pkg
+
+ for pkg in sorted(pkglist):
+ print("%s:" % pkg)
+ if args.runtime:
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime-reverse", pkg)
+ if not os.path.exists(pkgdatafile):
+ if args.recipe:
+ # This package was empty and thus never packaged, ignore
+ continue
+ logger.error("Unable to find any built runtime package named %s" % pkg)
+ sys.exit(1)
+ parse_pkgdatafile(pkgdatafile, args.long)
+
+ else:
+ providepkgpath = os.path.join(args.pkgdata_dir, "runtime-rprovides", pkg)
+ if os.path.exists(providepkgpath):
+ for f in os.listdir(providepkgpath):
+ if f != pkg:
+ print("%s is in the RPROVIDES of %s:" % (pkg, f))
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", f)
+ parse_pkgdatafile(pkgdatafile, args.long)
+ continue
+ pkgdatafile = os.path.join(args.pkgdata_dir, "runtime", pkg)
+ if not os.path.exists(pkgdatafile):
+ logger.error("Unable to find any built recipe-space package named %s" % pkg)
+ sys.exit(1)
+ parse_pkgdatafile(pkgdatafile, args.long)
+
+def find_path(args):
+ import json
+
+ found = False
+ for root, dirs, files in os.walk(os.path.join(args.pkgdata_dir, 'runtime')):
+ for fn in files:
+ with open(os.path.join(root,fn)) as f:
+ for line in f:
+ if line.startswith('FILES_INFO:'):
+ val = line.split(':', 1)[1].strip()
+ dictval = json.loads(val)
+ for fullpth in dictval.keys():
+ if fnmatch.fnmatchcase(fullpth, args.targetpath):
+ found = True
+ print("%s: %s" % (fn, fullpth))
+ break
+ if not found:
+ logger.error("Unable to find any package producing path %s" % args.targetpath)
+ sys.exit(1)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded pkgdata tool - queries the pkgdata files written out during do_package",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-p', '--pkgdata-dir', help='Path to pkgdata directory (determined automatically if not specified)')
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ parser_lookup_pkg = subparsers.add_parser('lookup-pkg',
+ help='Translate between recipe-space package names and runtime package names',
+ description='Looks up the specified recipe-space package name(s) to see what the final runtime package name is (e.g. glibc becomes libc6), or with -r/--reverse looks up the other way.')
+ parser_lookup_pkg.add_argument('pkg', nargs='+', help='Package name to look up')
+ parser_lookup_pkg.add_argument('-r', '--reverse', help='Switch to looking up recipe-space package names from runtime package names', action='store_true')
+ parser_lookup_pkg.set_defaults(func=lookup_pkg)
+
+ parser_list_pkgs = subparsers.add_parser('list-pkgs',
+ help='List packages',
+ description='Lists packages that have been built')
+ parser_list_pkgs.add_argument('pkgspec', nargs='*', help='Package name to search for (wildcards * ? allowed, use quotes to avoid shell expansion)')
+ parser_list_pkgs.add_argument('-r', '--runtime', help='Show runtime package names instead of recipe-space package names', action='store_true')
+ parser_list_pkgs.add_argument('-p', '--recipe', help='Limit to packages produced by the specified recipe')
+ parser_list_pkgs.add_argument('-u', '--unpackaged', help='Include unpackaged (i.e. empty) packages', action='store_true')
+ parser_list_pkgs.set_defaults(func=list_pkgs)
+
+ parser_list_pkg_files = subparsers.add_parser('list-pkg-files',
+ help='List files within a package',
+ description='Lists files included in one or more packages')
+ parser_list_pkg_files.add_argument('pkg', nargs='*', help='Package name to report on (if -p/--recipe is not specified)')
+ parser_list_pkg_files.add_argument('-r', '--runtime', help='Specified package(s) are runtime package names instead of recipe-space package names', action='store_true')
+ parser_list_pkg_files.add_argument('-p', '--recipe', help='Report on all packages produced by the specified recipe')
+ parser_list_pkg_files.add_argument('-u', '--unpackaged', help='Include unpackaged (i.e. empty) packages (only useful with -p/--recipe)', action='store_true')
+ parser_list_pkg_files.add_argument('-l', '--long', help='Show more information per file', action='store_true')
+ parser_list_pkg_files.set_defaults(func=list_pkg_files)
+
+ parser_lookup_recipe = subparsers.add_parser('lookup-recipe',
+ help='Find recipe producing one or more packages',
+ description='Looks up the specified runtime package(s) to see which recipe they were produced by')
+ parser_lookup_recipe.add_argument('pkg', nargs='+', help='Runtime package name to look up')
+ parser_lookup_recipe.set_defaults(func=lookup_recipe)
+
+ parser_package_info = subparsers.add_parser('package-info',
+ help='Show version, recipe and size information for one or more packages',
+ description='Looks up the specified runtime package(s) and display information')
+ parser_package_info.add_argument('pkg', nargs='*', help='Runtime package name to look up')
+ parser_package_info.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
+ parser_package_info.add_argument('-e', '--extra', help='Extra variables to display, e.g., LICENSE (can be specified multiple times)', action='append')
+ parser_package_info.set_defaults(func=package_info)
+
+ parser_find_path = subparsers.add_parser('find-path',
+ help='Find package providing a target path',
+ description='Finds the recipe-space package providing the specified target path')
+ parser_find_path.add_argument('targetpath', help='Path to find (wildcards * ? allowed, use quotes to avoid shell expansion)')
+ parser_find_path.set_defaults(func=find_path)
+
+ parser_read_value = subparsers.add_parser('read-value',
+ help='Read any pkgdata value for one or more packages',
+ description='Reads the named value from the pkgdata files for the specified packages')
+ parser_read_value.add_argument('valuename', help='Name of the value to look up')
+ parser_read_value.add_argument('pkg', nargs='*', help='Runtime package name to look up')
+ parser_read_value.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
+ parser_read_value.add_argument('-n', '--prefix-name', help='Prefix output with package name', action='store_true')
+ parser_read_value.add_argument('-u', '--unescape', help='Expand escapes such as \\n', action='store_true')
+ parser_read_value.set_defaults(func=read_value)
+
+ parser_glob = subparsers.add_parser('glob',
+ help='Expand package name glob expression',
+ description='Expands one or more glob expressions over the packages listed in pkglistfile')
+ parser_glob.add_argument('pkglistfile', help='File listing packages (one package name per line)')
+ parser_glob.add_argument('glob', nargs="+", help='Glob expression for package names, e.g. *-dev')
+ parser_glob.add_argument('-x', '--exclude', help='Exclude packages matching specified regex from the glob operation')
+ parser_glob.set_defaults(func=glob)
+
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
+ if not args.pkgdata_dir:
+ import scriptpath
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+ logger.debug('Found bitbake path: %s' % bitbakepath)
+ tinfoil = tinfoil_init()
+ try:
+ args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ finally:
+ tinfoil.shutdown()
+ logger.debug('Value of PKGDATA_DIR is "%s"' % args.pkgdata_dir)
+ if not args.pkgdata_dir:
+ logger.error('Unable to determine pkgdata directory from PKGDATA_DIR')
+ sys.exit(1)
+
+ if not os.path.exists(args.pkgdata_dir):
+ logger.error('Unable to find pkgdata directory %s' % args.pkgdata_dir)
+ sys.exit(1)
+
+ ret = args.func(args)
+
+ return ret
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/oe-publish-sdk b/poky/scripts/oe-publish-sdk
new file mode 100755
index 000000000..ee33acf90
--- /dev/null
+++ b/poky/scripts/oe-publish-sdk
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded SDK publishing tool
+
+# Copyright (C) 2015-2016 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import re
+import subprocess
+import logging
+import shutil
+import errno
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import argparse_oe
+logger = scriptutils.logger_create('sdktool')
+
+def mkdir(d):
+ try:
+ os.makedirs(d)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise e
+
+def publish(args):
+ logger.debug("In publish function")
+ target_sdk = args.sdk
+ destination = args.dest
+ logger.debug("target_sdk = %s, update_server = %s" % (target_sdk, destination))
+ sdk_basename = os.path.basename(target_sdk)
+
+ # Ensure the SDK exists
+ if not os.path.exists(target_sdk):
+ logger.error("Specified SDK %s doesn't exist" % target_sdk)
+ return -1
+ if os.path.isdir(target_sdk):
+ logger.error("%s is a directory - expected path to SDK installer file" % target_sdk)
+ return -1
+
+ if ':' in destination:
+ is_remote = True
+ host, destdir = destination.split(':')
+ dest_sdk = os.path.join(destdir, sdk_basename)
+ else:
+ is_remote = False
+ dest_sdk = os.path.join(destination, sdk_basename)
+ destdir = destination
+
+ # Making sure the directory exists
+ logger.debug("Making sure the destination directory exists")
+ if not is_remote:
+ mkdir(destination)
+ else:
+ cmd = "ssh %s 'mkdir -p %s'" % (host, destdir)
+ ret = subprocess.call(cmd, shell=True)
+ if ret != 0:
+ logger.error("Making directory %s on %s failed" % (destdir, host))
+ return ret
+
+ # Copying the SDK to the destination
+ logger.info("Copying the SDK to destination")
+ if not is_remote:
+ if os.path.exists(dest_sdk):
+ os.remove(dest_sdk)
+ if (os.stat(target_sdk).st_dev == os.stat(destination).st_dev):
+ os.link(target_sdk, dest_sdk)
+ else:
+ shutil.copy(target_sdk, dest_sdk)
+ else:
+ cmd = "scp %s %s" % (target_sdk, destination)
+ ret = subprocess.call(cmd, shell=True)
+ if ret != 0:
+ logger.error("scp %s %s failed" % (target_sdk, destination))
+ return ret
+
+ # Unpack the SDK
+ logger.info("Unpacking SDK")
+ if not is_remote:
+ cmd = "sh %s -p -y -d %s" % (dest_sdk, destination)
+ ret = subprocess.call(cmd, shell=True)
+ if ret == 0:
+ logger.info('Successfully unpacked %s to %s' % (dest_sdk, destination))
+ os.remove(dest_sdk)
+ else:
+ logger.error('Failed to unpack %s to %s' % (dest_sdk, destination))
+ return ret
+ else:
+ cmd = "ssh %s 'sh %s -p -y -d %s && rm -f %s'" % (host, dest_sdk, destdir, dest_sdk)
+ ret = subprocess.call(cmd, shell=True)
+ if ret == 0:
+ logger.info('Successfully unpacked %s to %s' % (dest_sdk, destdir))
+ else:
+ logger.error('Failed to unpack %s to %s' % (dest_sdk, destdir))
+ return ret
+
+ # Setting up the git repo
+ if not is_remote:
+ cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
+ else:
+ cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
+ ret = subprocess.call(cmd, shell=True)
+ if ret == 0:
+ logger.info('SDK published successfully')
+ else:
+ logger.error('Failed to set up layer git repo')
+ return ret
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded extensible SDK publishing tool - writes server-side data to support the extensible SDK update process to a specified location")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+
+ parser.add_argument('sdk', help='Extensible SDK to publish (path to .sh installer file)')
+ parser.add_argument('dest', help='Destination to publish SDK to; can be local path or remote in the form of user@host:/path (in the latter case ssh/scp will be used).')
+
+ parser.set_defaults(func=publish)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ ret = args.func(args)
+ return ret
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/oe-run-native b/poky/scripts/oe-run-native
new file mode 100755
index 000000000..1131122e6
--- /dev/null
+++ b/poky/scripts/oe-run-native
@@ -0,0 +1,68 @@
+#!/bin/bash
+#
+# Copyright (c) 2016, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>
+#
+
+#
+# This script is for running tools from native oe sysroot
+#
+
+if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+ echo 'oe-run-native: the following arguments are required: <native recipe> <native tool>'
+ echo 'Usage: oe-run-native native-recipe tool [parameters]'
+ echo ''
+ echo 'OpenEmbedded run-native - runs native tools'
+ echo ''
+ echo 'arguments:'
+ echo ' native-recipe The recipe which provoides tool'
+ echo ' tool Native tool to run'
+ echo ''
+ exit 2
+fi
+
+native_recipe="$1"
+tool="$2"
+
+if [ "${native_recipe%-native}" = "$native_recipe" ]; then
+ echo Error: $native_recipe is not a native recipe
+ echo Error: Use \"oe-run-native -h\" for help
+ exit 1
+fi
+
+shift
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find oe-find-native-sysroot script"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT $native_recipe
+
+OLD_PATH=$PATH
+
+# look for a tool only in native sysroot
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin
+tool_find=`/usr/bin/which $tool 2>/dev/null`
+
+if [ -n "$tool_find" ] ; then
+ # add old path to allow usage of host tools
+ PATH=$PATH:$OLD_PATH $@
+else
+ echo "Error: Unable to find '$tool' in $PATH"
+ echo "Error: Have you run 'bitbake $native_recipe -caddto_recipe_sysroot'?"
+ exit 1
+fi
diff --git a/poky/scripts/oe-selftest b/poky/scripts/oe-selftest
new file mode 100755
index 000000000..1bf860a41
--- /dev/null
+++ b/poky/scripts/oe-selftest
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2013-2017 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# DESCRIPTION
+# This script runs tests defined in meta/lib/oeqa/selftest/
+# It's purpose is to automate the testing of different bitbake tools.
+# To use it you just need to source your build environment setup script and
+# add the meta-selftest layer to your BBLAYERS.
+# Call the script as: "oe-selftest -a" to run all the tests in meta/lib/oeqa/selftest/
+# Call the script as: "oe-selftest -r <module>.<Class>.<method>" to run just a single test
+# E.g: "oe-selftest -r bblayers.BitbakeLayers" will run just the BitbakeLayers class from meta/lib/oeqa/selftest/bblayers.py
+
+
+
+import os
+import sys
+import argparse
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+import scriptpath
+scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
+
+from oeqa.utils import load_test_components
+from oeqa.core.exception import OEQAPreRun
+
+logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout)
+
+def main():
+ description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
+ parser = argparse_oe.ArgumentParser(description=description)
+
+ comp_name, comp = load_test_components(logger, 'oe-selftest').popitem()
+ comp.register_commands(logger, parser)
+
+ try:
+ args = parser.parse_args()
+ results = args.func(logger, args)
+ ret = 0 if results.wasSuccessful() else 1
+ except SystemExit as err:
+ if err.code != 0:
+ raise err
+ ret = err.code
+ except OEQAPreRun as pr:
+ ret = 1
+
+ return ret
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/oe-setup-builddir b/poky/scripts/oe-setup-builddir
new file mode 100755
index 000000000..55d73ca1e
--- /dev/null
+++ b/poky/scripts/oe-setup-builddir
@@ -0,0 +1,140 @@
+#!/bin/sh
+
+# OE Build Environment Setup Script
+#
+# Copyright (C) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+if [ -z "$BUILDDIR" ]; then
+ echo >&2 "Error: The build directory (BUILDDIR) must be set!"
+ exit 1
+fi
+
+if [ "$1" = '--help' -o "$1" = '-h' ]; then
+ echo 'Usage: oe-setup-builddir'
+ echo ''
+ echo "OpenEmbedded setup-builddir - setup build directory $BUILDDIR"
+ echo ''
+ exit 2
+fi
+
+mkdir -p "$BUILDDIR/conf"
+
+if [ ! -d "$BUILDDIR" ]; then
+ echo >&2 "Error: The builddir ($BUILDDIR) does not exist!"
+ exit 1
+fi
+
+if [ ! -w "$BUILDDIR" ]; then
+ echo >&2 "Error: Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
+ exit 1
+fi
+
+# Attempting removal of sticky,setuid bits from BUILDDIR, BUILDDIR/conf
+chmod -st "$BUILDDIR" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR"
+chmod -st "$BUILDDIR/conf" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR/conf"
+
+cd "$BUILDDIR"
+
+if [ -f "$BUILDDIR/conf/templateconf.cfg" ]; then
+ TEMPLATECONF=$(cat "$BUILDDIR/conf/templateconf.cfg")
+fi
+
+. $OEROOT/.templateconf
+
+if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
+ echo "$TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
+fi
+
+#
+# $TEMPLATECONF can point to a directory for the template local.conf & bblayers.conf
+#
+if [ -n "$TEMPLATECONF" ]; then
+ if [ ! -d "$TEMPLATECONF" ]; then
+ # Allow TEMPLATECONF=meta-xyz/conf as a shortcut
+ if [ -d "$OEROOT/$TEMPLATECONF" ]; then
+ TEMPLATECONF="$OEROOT/$TEMPLATECONF"
+ fi
+ if [ ! -d "$TEMPLATECONF" ]; then
+ echo >&2 "Error: TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
+ exit 1
+ fi
+ fi
+ OECORELAYERCONF="$TEMPLATECONF/bblayers.conf.sample"
+ OECORELOCALCONF="$TEMPLATECONF/local.conf.sample"
+ OECORENOTESCONF="$TEMPLATECONF/conf-notes.txt"
+fi
+
+unset SHOWYPDOC
+if [ -z "$OECORELOCALCONF" ]; then
+ OECORELOCALCONF="$OEROOT/meta/conf/local.conf.sample"
+fi
+if [ ! -r "$BUILDDIR/conf/local.conf" ]; then
+ cat <<EOM
+You had no conf/local.conf file. This configuration file has therefore been
+created for you with some default values. You may wish to edit it to, for
+example, select a different MACHINE (target hardware). See conf/local.conf
+for more information as common configuration options are commented.
+
+EOM
+ cp -f $OECORELOCALCONF "$BUILDDIR/conf/local.conf"
+ SHOWYPDOC=yes
+fi
+
+if [ -z "$OECORELAYERCONF" ]; then
+ OECORELAYERCONF="$OEROOT/meta/conf/bblayers.conf.sample"
+fi
+if [ ! -r "$BUILDDIR/conf/bblayers.conf" ]; then
+ cat <<EOM
+You had no conf/bblayers.conf file. This configuration file has therefore been
+created for you with some default values. To add additional metadata layers
+into your configuration please add entries to conf/bblayers.conf.
+
+EOM
+
+ # Put the abosolute path to the layers in bblayers.conf so we can run
+ # bitbake without the init script after the first run
+ # ##COREBASE## is deprecated as it's meaning was inconsistent, but continue
+ # to replace it for compatibility.
+ sed -e "s|##OEROOT##|$OEROOT|g" \
+ -e "s|##COREBASE##|$OEROOT|g" \
+ $OECORELAYERCONF > "$BUILDDIR/conf/bblayers.conf"
+ SHOWYPDOC=yes
+fi
+
+# Prevent disturbing a new GIT clone in same console
+unset OECORELOCALCONF
+unset OECORELAYERCONF
+
+# Ending the first-time run message. Show the YP Documentation banner.
+if [ ! -z "$SHOWYPDOC" ]; then
+ cat <<EOM
+The Yocto Project has extensive documentation about OE including a reference
+manual which can be found at:
+ http://yoctoproject.org/documentation
+
+For more information about OpenEmbedded see their website:
+ http://www.openembedded.org/
+
+EOM
+# unset SHOWYPDOC
+fi
+
+if [ -z "$OECORENOTESCONF" ]; then
+ OECORENOTESCONF="$OEROOT/meta/conf/conf-notes.txt"
+fi
+[ ! -r "$OECORENOTESCONF" ] || cat $OECORENOTESCONF
+unset OECORENOTESCONF
diff --git a/poky/scripts/oe-test b/poky/scripts/oe-test
new file mode 100755
index 000000000..34d9012d1
--- /dev/null
+++ b/poky/scripts/oe-test
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded test tool
+#
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import argparse
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+
+# oe-test is used for testexport and it doesn't have oe lib
+# so we just skip adding these libraries (not used in testexport)
+try:
+ import scriptpath
+ scriptpath.add_oe_lib_path()
+except ImportError:
+ pass
+
+from oeqa.utils import load_test_components
+from oeqa.core.exception import OEQAPreRun
+
+logger = scriptutils.logger_create('oe-test', stream=sys.stdout)
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded test tool",
+ add_help=False,
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ global_args, unparsed_args = parser.parse_known_args()
+
+ # Help is added here rather than via add_help=True, as we don't want it to
+ # be handled by parse_known_args()
+ parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ if global_args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif global_args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ components = load_test_components(logger, 'oe-test')
+
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.add_subparser_group('components', 'Test components')
+ subparsers.required = True
+ for comp_name in sorted(components.keys()):
+ comp = components[comp_name]
+ comp.register_commands(logger, subparsers)
+
+ try:
+ args = parser.parse_args(unparsed_args, namespace=global_args)
+ results = args.func(logger, args)
+ ret = 0 if results.wasSuccessful() else 1
+ except SystemExit as err:
+ if err.code != 0:
+ raise err
+ ret = err.code
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+ except OEQAPreRun as pr:
+ ret = 1
+
+ return ret
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/oe-trim-schemas b/poky/scripts/oe-trim-schemas
new file mode 100755
index 000000000..7c199ef1d
--- /dev/null
+++ b/poky/scripts/oe-trim-schemas
@@ -0,0 +1,58 @@
+#! /usr/bin/env python3
+
+import sys
+try:
+ import xml.etree.cElementTree as etree
+except:
+ import xml.etree.ElementTree as etree
+
+def child (elem, name):
+ for e in elem.getchildren():
+ if e.tag == name:
+ return e
+ return None
+
+def children (elem, name=None):
+ l = elem.getchildren()
+ if name:
+ l = [e for e in l if e.tag == name]
+ return l
+
+if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
+ print('oe-trim-schemas: error: the following arguments are required: schema\n'
+ 'Usage: oe-trim-schemas schema\n\n'
+ 'OpenEmbedded trim schemas - remove unneeded schema locale translations\n'
+ ' from gconf schema files\n\n'
+ 'arguments:\n'
+ ' schema gconf schema file to trim\n')
+ sys.exit(2)
+
+xml = etree.parse(sys.argv[1])
+
+for schema in child(xml.getroot(), "schemalist").getchildren():
+ e = child(schema, "short")
+ if e is not None:
+ schema.remove(e)
+
+ e = child(schema, "long")
+ if e is not None:
+ schema.remove(e)
+
+ for locale in children(schema, "locale"):
+ # One locale must exist so leave C locale...
+ a = locale.attrib.get("name")
+ if a == 'C':
+ continue
+ e = child(locale, "default")
+ if e is None:
+ schema.remove(locale)
+ else:
+ e = child(locale, "short")
+ if e is not None:
+ locale.remove(e)
+ e = child(locale, "long")
+ if e is not None:
+ locale.remove(e)
+
+xml.write(sys.stdout, "UTF-8")
+
diff --git a/poky/scripts/oepydevshell-internal.py b/poky/scripts/oepydevshell-internal.py
new file mode 100755
index 000000000..04621ae8a
--- /dev/null
+++ b/poky/scripts/oepydevshell-internal.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import time
+import select
+import fcntl
+import termios
+import readline
+import signal
+
+def nonblockingfd(fd):
+ fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+def echonocbreak(fd):
+ old = termios.tcgetattr(fd)
+ old[3] = old[3] | termios.ECHO | termios.ICANON
+ termios.tcsetattr(fd, termios.TCSADRAIN, old)
+
+def cbreaknoecho(fd):
+ old = termios.tcgetattr(fd)
+ old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
+ termios.tcsetattr(fd, termios.TCSADRAIN, old)
+
+if len(sys.argv) != 3 or sys.argv[1] in ('-h', '--help'):
+ print('oepydevshell-internal.py: error: the following arguments are required: pty, pid\n'
+ 'Usage: oepydevshell-internal.py pty pid\n\n'
+ 'OpenEmbedded oepydevshell-internal.py - internal script called from meta/classes/devshell.bbclass\n\n'
+ 'arguments:\n'
+ ' pty pty device name\n'
+ ' pid parent process id\n\n'
+ 'options:\n'
+ ' -h, --help show this help message and exit\n')
+ sys.exit(2)
+
+pty = open(sys.argv[1], "w+b", 0)
+parent = int(sys.argv[2])
+
+nonblockingfd(pty)
+nonblockingfd(sys.stdin)
+
+
+histfile = os.path.expanduser("~/.oedevpyshell-history")
+readline.parse_and_bind("tab: complete")
+try:
+ readline.read_history_file(histfile)
+except IOError:
+ pass
+
+try:
+
+ i = ""
+ o = ""
+ # Need cbreak/noecho whilst in select so we trigger on any keypress
+ cbreaknoecho(sys.stdin.fileno())
+ # Send our PID to the other end so they can kill us.
+ pty.write(str(os.getpid()).encode('utf-8') + b"\n")
+ while True:
+ try:
+ writers = []
+ if i:
+ writers.append(sys.stdout)
+ (ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
+ try:
+ if pty in ready:
+ i = i + pty.read().decode('utf-8')
+ if i:
+ # Write a page at a time to avoid overflowing output
+ # d.keys() is a good way to do that
+ sys.stdout.write(i[:4096])
+ sys.stdout.flush()
+ i = i[4096:]
+ if sys.stdin in ready:
+ echonocbreak(sys.stdin.fileno())
+ o = input().encode('utf-8')
+ cbreaknoecho(sys.stdin.fileno())
+ pty.write(o + b"\n")
+ except (IOError, OSError) as e:
+ if e.errno == 11:
+ continue
+ if e.errno == 5:
+ sys.exit(0)
+ raise
+ except EOFError:
+ sys.exit(0)
+ except KeyboardInterrupt:
+ os.kill(parent, signal.SIGINT)
+
+except SystemExit:
+ pass
+except Exception as e:
+ import traceback
+ print("Exception in oepydehshell-internal: " + str(e))
+ traceback.print_exc()
+ time.sleep(5)
+finally:
+ readline.write_history_file(histfile)
diff --git a/poky/scripts/opkg-query-helper.py b/poky/scripts/opkg-query-helper.py
new file mode 100755
index 000000000..ce89491f6
--- /dev/null
+++ b/poky/scripts/opkg-query-helper.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded opkg query helper utility
+#
+# Written by: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Copyright 2012 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+#
+
+
+import sys
+import fileinput
+import re
+
+archmode = False
+filemode = False
+vermode = False
+
+args = []
+for arg in sys.argv[1:]:
+ if arg == '-a':
+ archmode = True
+ elif arg == '-f':
+ filemode = True
+ elif arg == '-v':
+ vermode = True
+ else:
+ args.append(arg)
+
+# Regex for removing version specs after dependency items
+verregex = re.compile(' \([=<>]* [^ )]*\)')
+
+pkg = ""
+ver = ""
+for line in fileinput.input(args):
+ line = line.rstrip()
+ if ': ' in line:
+ if line.startswith("Package:"):
+ pkg = line.split(": ")[1]
+ ver = ""
+ else:
+ if archmode:
+ if line.startswith("Architecture:"):
+ arch = line.split(": ")[1]
+ print("%s %s" % (pkg,arch))
+ elif filemode:
+ if line.startswith("Version:"):
+ ver = line.split(": ")[1]
+ elif line.startswith("Architecture:"):
+ arch = line.split(": ")[1]
+ print("%s %s_%s_%s.ipk %s" % (pkg,pkg,ver,arch,arch))
+ elif vermode:
+ if line.startswith("Version:"):
+ ver = line.split(": ")[1]
+ elif line.startswith("Architecture:"):
+ arch = line.split(": ")[1]
+ print("%s %s %s" % (pkg,arch,ver))
+ else:
+ if line.startswith("Depends:"):
+ depval = line.split(": ")[1]
+ deps = depval.split(", ")
+ for dep in deps:
+ dep = verregex.sub('', dep)
+ print("%s|%s" % (pkg,dep))
+ elif line.startswith("Recommends:"):
+ recval = line.split(": ")[1]
+ recs = recval.split(", ")
+ for rec in recs:
+ rec = verregex.sub('', rec)
+ print("%s|%s [REC]" % (pkg, rec))
+
diff --git a/poky/scripts/postinst-intercepts/delay_to_first_boot b/poky/scripts/postinst-intercepts/delay_to_first_boot
new file mode 100644
index 000000000..ecdbef95d
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/delay_to_first_boot
@@ -0,0 +1,2 @@
+#!/bin/sh
+exit 1
diff --git a/poky/scripts/postinst-intercepts/postinst_intercept b/poky/scripts/postinst-intercepts/postinst_intercept
new file mode 100755
index 000000000..b18e806d4
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/postinst_intercept
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# This script is called from inside postinstall scriptlets at do_rootfs time. It
+# actually adds, at the end, the list of packages for which the intercept script
+# is valid. Also, if one wants to pass any variables to the intercept script from
+# the postinstall itself, they will be added immediately after the shebang line.
+#
+# Usage: postinst_intercept <intercept_script_name> <package_name> <mlprefix=...> <var1=...> ... <varN=...>
+# * intercept_script_name - the name of the intercept script we want to change;
+# * package_name - add the package_name to list of packages the intercept script
+# is used for;
+# * mlprefix=... - this one is needed in order to have separate hooks for multilib.
+# * var1=... - var1 will have the value we provide in the intercept script. This
+# is useful when we want to pass on variables like ${libdir} to
+# the intercept script;
+#
+[ $# -lt 3 ] && exit 1
+
+intercept_script=$INTERCEPT_DIR/$1 && shift
+package_name=$1 && shift
+mlprefix=$(echo $1 |sed -ne "s/^mlprefix=\(.*\)-/\1/p") && shift
+
+# if the hook we want to install does not exist, then there's nothing we can do
+[ -f "$intercept_script" ] || exit 1
+
+# if the postinstall wanting to install the hook belongs to a multilib package,
+# then we'd better have a separate hook for this because the default ${libdir} and
+# ${base_libdir} will point to the wrong locations
+if [ -n "$mlprefix" ]; then
+ ml_intercept_script=$intercept_script-$mlprefix
+ # if the multilib hook does not exist, create it from the default one
+ if [ ! -f "$ml_intercept_script" ]; then
+ cp $intercept_script $ml_intercept_script
+
+ # clear the ##PKGS: line and the already set variables
+ [ -x "$ml_intercept_script" ] && sed -i -e "2,$(($#+1)) {/.*/d}" -e "/^##PKGS: .*/d" $ml_intercept_script
+ fi
+
+ intercept_script=$ml_intercept_script
+fi
+
+chmod +x "$intercept_script"
+
+pkgs_line=$(grep "##PKGS:" $intercept_script)
+if [ -n "$pkgs_line" ]; then
+ # line exists, add this package to the list only if it's not already there
+ if [ -z "$(echo "$pkgs_line" | grep " $package_name ")" ]; then
+ sed -i -e "s/##PKGS:.*/\0${package_name} /" $intercept_script
+ fi
+else
+ for var in "$@"; do
+ sed -i -e "\%^#\!/bin/.*sh%a $var" $intercept_script
+ done
+ echo "##PKGS: ${package_name} " >> $intercept_script
+fi
+
diff --git a/poky/scripts/postinst-intercepts/update_font_cache b/poky/scripts/postinst-intercepts/update_font_cache
new file mode 100644
index 000000000..bf65e19a4
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/update_font_cache
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -e
+
+PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D/${libdir}:$D/${base_libdir} \
+ -E ${fontconfigcacheenv} $D${bindir}/fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+chown -R root:root $D${fontconfigcachedir}
diff --git a/poky/scripts/postinst-intercepts/update_gio_module_cache b/poky/scripts/postinst-intercepts/update_gio_module_cache
new file mode 100644
index 000000000..fc3f9d0d6
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/update_gio_module_cache
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+set -e
+
+PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D${libdir}:$D${base_libdir} \
+ $D${libexecdir}/${binprefix}gio-querymodules $D${libdir}/gio/modules/
+
+[ ! -e $D${libdir}/gio/modules/giomodule.cache ] ||
+ chown root:root $D${libdir}/gio/modules/giomodule.cache
diff --git a/poky/scripts/postinst-intercepts/update_icon_cache b/poky/scripts/postinst-intercepts/update_icon_cache
new file mode 100644
index 000000000..9cf2a72a0
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/update_icon_cache
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+# update native pixbuf loaders
+$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
+
+for icondir in $D/usr/share/icons/*/ ; do
+ if [ -d $icondir ] ; then
+ gtk-update-icon-cache -fqt $icondir
+ fi
+done
+
diff --git a/poky/scripts/postinst-intercepts/update_pixbuf_cache b/poky/scripts/postinst-intercepts/update_pixbuf_cache
new file mode 100644
index 000000000..5d44075fb
--- /dev/null
+++ b/poky/scripts/postinst-intercepts/update_pixbuf_cache
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+set -e
+
+export GDK_PIXBUF_MODULEDIR=$D${libdir}/gdk-pixbuf-2.0/2.10.0/loaders
+export GDK_PIXBUF_FATAL_LOADER=1
+
+PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D/${libdir}:$D/${base_libdir}\
+ $D${libdir}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders \
+ >$GDK_PIXBUF_MODULEDIR/../loaders.cache && \
+ sed -i -e "s:$D::g" $GDK_PIXBUF_MODULEDIR/../loaders.cache
diff --git a/poky/scripts/pybootchartgui/AUTHORS b/poky/scripts/pybootchartgui/AUTHORS
new file mode 100644
index 000000000..672b7e952
--- /dev/null
+++ b/poky/scripts/pybootchartgui/AUTHORS
@@ -0,0 +1,11 @@
+Michael Meeks <michael.meeks@novell.com>
+Anders Norgaard <anders.norgaard@gmail.com>
+Scott James Remnant <scott@ubuntu.com>
+Henning Niss <henningniss@gmail.com>
+Riccardo Magliocchetti <riccardo.magliocchetti@gmail.com>
+
+Contributors:
+ Brian Ewins
+
+Based on work by:
+ Ziga Mahkovec
diff --git a/poky/scripts/pybootchartgui/COPYING b/poky/scripts/pybootchartgui/COPYING
new file mode 100644
index 000000000..ed87acf94
--- /dev/null
+++ b/poky/scripts/pybootchartgui/COPYING
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/poky/scripts/pybootchartgui/MAINTAINERS b/poky/scripts/pybootchartgui/MAINTAINERS
new file mode 100644
index 000000000..c65e1315f
--- /dev/null
+++ b/poky/scripts/pybootchartgui/MAINTAINERS
@@ -0,0 +1,3 @@
+Riccardo Magliocchetti <riccardo.magliocchetti@gmail.com>
+Michael Meeks <michael.meeks@novell.com>
+Harald Hoyer <harald@redhat.com>
diff --git a/poky/scripts/pybootchartgui/NEWS b/poky/scripts/pybootchartgui/NEWS
new file mode 100644
index 000000000..7c5b2fc3a
--- /dev/null
+++ b/poky/scripts/pybootchartgui/NEWS
@@ -0,0 +1,204 @@
+bootchart2 0.14.5:
+ + pybootchartgui (Riccardo)
+ + Fix tests with python3
+ + Fix parsing of files with non-ascii bytes
+ + Robustness fixes to taskstats and meminfo parsing
+ + More python3 fixes
+
+bootchart2 0.14.4:
+ + bootchartd
+ + Add relevant EXIT_PROC for GNOME3, XFCE4, openbox
+ (Justin Lecher, Ben Eills)
+ + pybootchartgui (Riccardo)
+ + Fix some issues in --crop-after and --annotate
+ + Fix pybootchartgui process_tree tests
+ + More python3 fixes
+
+bootchart2 0.14.2:
+ + pybootchartgui
+ + Fix some crashes in parsing.py (Jakub Czaplicki, Riccardo)
+ + speedup a bit meminfo parsing (Riccardo)
+ + Fix indentation for python3.2 (Riccardo)
+
+bootchart2 0.14.1:
+ + bootchartd
+ + Expect dmesg only if started as init (Henry Yei)
+ + look for bootchart_init in the environment (Henry Gebhardt)
+ + pybootchartgui
+ + Fixup some tests (Riccardo)
+ + Support hp smart arrays block devices (Anders Norgaard,
+ Brian Murray)
+ + Fixes for -t, -o and -f options (Mladen Kuntner, Harald, Riccardo)
+
+bootchart2 0.14.0:
+ + bootchartd
+ + Add ability to define custom commands
+ (Lucian Muresan, Peter Hjalmarsson)
+ + collector
+ + fix tmpfs mount leakage (Peter Hjalmarsson)
+ + pybootchartgui
+ + render cumulative I/O time chart (Sankar P)
+ + python3 compatibility fixes (Riccardo)
+ + Misc (Michael)
+ + remove confusing, obsolete setup.py
+ + install docs to /usr/share/
+ + lot of fixes for easier packaging (Peter Hjalmarsson)
+ + add bootchart2, bootchartd and pybootchartgui manpages
+ (Francesca Ciceri, David Paleino)
+
+bootchart2 0.12.6:
+ + bootchartd
+ + better check for initrd (Riccardo Magliocchetti)
+ + code cleanup (Riccardo)
+ + make the list of processes we are waiting for editable
+ in config file by EXIT_PROC (Riccardo)
+ + fix parsing of cmdline for alternative init system (Riccardo)
+ + fixed calling init in initramfs (Harald)
+ + exit 0 for start, if the collector is already running (Harald)
+ + collector
+ + try harder with taskstats (Michael)
+ + plug some small leaks (Riccardo)
+ + fix missing PROC_EVENTS detection (Harald)
+ + pybootchartgui (Michael)
+ + add kernel bootchart tab to interactive gui
+ + report bootchart version in cli interface
+ + improve rendering performance
+ + GUI improvements
+ + lot of cleanups
+ + Makefile
+ + do not python compile if NO_PYTHON_COMPILE is set (Harald)
+ + systemd service files
+ + added them and install (Harald, Wulf C. Krueger)
+
+bootchart2 0.12.5:
+ + administrative snafu version; pull before pushing...
+
+bootchart2 0.12.4:
+ + bootchartd
+ + reduce overhead caused by pidof (Riccardo Magliocchetti)
+ + collector
+ + attempt to retry ptrace to avoid bogus ENOSYS (Michael)
+ + add meminfo polling (Dave Martin)
+ + pybootchartgui
+ + handle dmesg timestamps with big delta (Riccardo)
+ + avoid divide by zero when rendering I/O utilization (Riccardo)
+ + add process grouping in the cumulative chart (Riccardo)
+ + fix cpu time calculation in cumulative chart (Riccardo)
+ + get i/o statistics for flash based devices (Riccardo)
+ + prettier coloring for the cumulative graphs (Michael)
+ + fix interactive CPU rendering (Michael)
+ + render memory usage graph (Dave Martin)
+
+bootchart2 0.12.3
+ + collector
+ + pclose after popen (Riccardo Magliocchetti (xrmx))
+ + fix buffer overflow (xrmx)
+ + count 'processor:' in /proc/cpuinfo for ARM (Michael)
+ + get model name from that line too for ARM (xrmx)
+ + store /proc/cpuinfo in the boot-chart archive (xrmx)
+ + try harder to detect missing TASKSTATS (Michael)
+ + sanity-check invalid domain names (Michael)
+ + detect missing PROC_EVENTS more reliably (Michael)
+ + README fixes (xrmx, Michael)
+ + pybootchartgui
+ + make num_cpu parsing robust (Michael)
+
+bootchart2 0.12.2
+ + fix pthread compile / linking bug
+
+bootchart2 0.12.1
+ + pybootchartgui
+ + pylint cleanup
+ + handle empty traces more elegantly
+ + add '-t' / '--boot-time' argument (Matthew Bauer)
+ + collector
+ + now GPLv2
+ + add rdinit support for very early initrd tracing
+ + cleanup / re-factor code into separate modules
+ + re-factor arg parsing, and parse remote process args
+ + handle missing bootchartd.conf cleanly
+ + move much of bootchartd from shell -> C
+ + drop dmesg and uname usage
+ + avoid rpm/dpkg with native version reporting
+
+bootchart2 0.12.0 (Michael Meeks)
+ + collector
+ + use netlink PROC_EVENTS to generate parentage data
+ + finally kills any need for 'acct' et. al.
+ + also removes need to poll /proc => faster
+ + cleanup code to K&R, 8 stop tabs.
+ + pybootchartgui
+ + consume thread parentage data
+
+bootchart2 0.11.4 (Michael Meeks)
+ + collector
+ + if run inside an initrd detect when /dev is writable
+ and remount ourselves into that.
+ + overflow buffers more elegantly in extremis
+ + dump full process path and command-line args
+ + calm down debugging output
+ + pybootchartgui
+ + can render logs in a directory again
+ + has a 'show more' option to show command-lines
+
+bootchart2 0.11.3 (Michael Meeks)
+ + add $$ display to the bootchart header
+ + process command-line bits
+ + fix collection code, and rename stream to match
+ + enable parsing, add check button to UI, and --show-all
+ command-line option
+ + fix parsing of directories full of files.
+
+bootchart2 0.11.2 (Michael Meeks)
+ + fix initrd sanity check to use the right proc path
+ + don't return a bogus error value when dumping state
+ + add -c to aid manual console debugging
+
+bootchart2 0.11.1 (Michael Meeks)
+ + even simpler initrd setup
+ + create a single directory: /lib/bootchart/tmpfs
+
+bootchart2 0.11 (Michael Meeks)
+ + bootchartd
+ + far, far simpler, less shell, more robustness etc.
+ + bootchart-collector
+ + remove the -p argument - we always mount proc
+ + requires /lib/bootchart (make install-chroot) to
+ be present (also in the initrd) [ with a kmsg
+ node included ]
+ + add a --probe-running mode
+ + ptrace re-write
+ + gives -much- better early-boot-time resolution
+ + unconditional chroot /lib/bootchart/chroot
+ + we mount proc there ourselves
+ + log extraction requires no common file-system view
+
+
+bootchart2 0.10.1 (Kel Modderman)
+ + collector arg -m should mount /proc
+ + remove bogus vcsid code
+ + split collector install in Makefile
+ + remove bogus debug code
+ + accept process names containing spaces
+
+bootchart2 0.10.0
+ + rendering (Anders Norgaard)
+ + fix for unknown exceptions
+ + interactive UI (Michael)
+ + much faster rendering by manual clipping
+ + horizontal scaling
+ + remove annoying page-up/down bindings
+ + initrd portability & fixes (Federic Crozat)
+ + port to Mandriva
+ + improved process waiting
+ + inittab commenting fix
+ + improved initrd detection / jail tagging
+ + fix for un-detectable accton behaviour change
+ + implement a built-in usleep to help initrd deps (Michael)
+
+bootchart2 0.0.9
+ + fix initrd bug
+
+bootchart2 0.0.8
+ + add a filename string to the window title in interactive mode
+ + add a NEWS file
diff --git a/poky/scripts/pybootchartgui/README.pybootchart b/poky/scripts/pybootchartgui/README.pybootchart
new file mode 100644
index 000000000..8642e6467
--- /dev/null
+++ b/poky/scripts/pybootchartgui/README.pybootchart
@@ -0,0 +1,37 @@
+ PYBOOTCHARTGUI
+ ----------------
+
+pybootchartgui is a tool (now included as part of bootchart2) for
+visualization and analysis of the GNU/Linux boot process. It renders
+the output of the boot-logger tool bootchart (see
+http://www.bootchart.org/) to either the screen or files of various
+formats. Bootchart collects information about the processes, their
+dependencies, and resource consumption during boot of a GNU/Linux
+system. The pybootchartgui tools visualizes the process tree and
+overall resource utilization.
+
+pybootchartgui is a port of the visualization part of bootchart from
+Java to Python and Cairo.
+
+Adapted from the bootchart-documentation:
+
+ The CPU and disk statistics are used to render stacked area and line
+ charts. The process information is used to create a Gantt chart
+ showing process dependency, states and CPU usage.
+
+ A typical boot sequence consists of several hundred processes. Since
+ it is difficult to visualize such amount of data in a comprehensible
+ way, tree pruning is utilized. Idle background processes and
+ short-lived processes are removed. Similar processes running in
+ parallel are also merged together.
+
+ Finally, the performance and dependency charts are rendered as a
+ single image to either the screen or in PNG, PDF or SVG format.
+
+
+To get help for pybootchartgui, run
+
+$ pybootchartgui --help
+
+This code was originally hosted at:
+ http://code.google.com/p/pybootchartgui/
diff --git a/poky/scripts/pybootchartgui/pybootchartgui.py b/poky/scripts/pybootchartgui/pybootchartgui.py
new file mode 100755
index 000000000..7ce1a5be4
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+
+import sys
+from pybootchartgui.main import main
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/__init__.py b/poky/scripts/pybootchartgui/pybootchartgui/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/__init__.py
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/batch.py b/poky/scripts/pybootchartgui/pybootchartgui/batch.py
new file mode 100644
index 000000000..05c714e95
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/batch.py
@@ -0,0 +1,46 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+import cairo
+from . import draw
+from .draw import RenderOptions
+
+def render(writer, trace, app_options, filename):
+ handlers = {
+ "png": (lambda w, h: cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h), \
+ lambda sfc: sfc.write_to_png(filename)),
+ "pdf": (lambda w, h: cairo.PDFSurface(filename, w, h), lambda sfc: 0),
+ "svg": (lambda w, h: cairo.SVGSurface(filename, w, h), lambda sfc: 0)
+ }
+
+ if app_options.format is None:
+ fmt = filename.rsplit('.', 1)[1]
+ else:
+ fmt = app_options.format
+
+ if not (fmt in handlers):
+ writer.error ("Unknown format '%s'." % fmt)
+ return 10
+
+ make_surface, write_surface = handlers[fmt]
+ options = RenderOptions (app_options)
+ (w, h) = draw.extents (options, 1.0, trace)
+ w = max (w, draw.MIN_IMG_W)
+ surface = make_surface (w, h)
+ ctx = cairo.Context (surface)
+ draw.render (ctx, options, 1.0, trace)
+ write_surface (surface)
+ writer.status ("bootchart written to '%s'" % filename)
+
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/draw.py b/poky/scripts/pybootchartgui/pybootchartgui/draw.py
new file mode 100644
index 000000000..201ce4577
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -0,0 +1,968 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+
+import cairo
+import math
+import re
+import random
+import colorsys
+from operator import itemgetter
+
+class RenderOptions:
+
+ def __init__(self, app_options):
+ # should we render a cumulative CPU time chart
+ self.cumulative = True
+ self.charts = True
+ self.kernel_only = False
+ self.app_options = app_options
+
+ def proc_tree (self, trace):
+ if self.kernel_only:
+ return trace.kernel_tree
+ else:
+ return trace.proc_tree
+
+# Process tree background color.
+BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
+
+WHITE = (1.0, 1.0, 1.0, 1.0)
+# Process tree border color.
+BORDER_COLOR = (0.63, 0.63, 0.63, 1.0)
+# Second tick line color.
+TICK_COLOR = (0.92, 0.92, 0.92, 1.0)
+# 5-second tick line color.
+TICK_COLOR_BOLD = (0.86, 0.86, 0.86, 1.0)
+# Annotation colour
+ANNOTATION_COLOR = (0.63, 0.0, 0.0, 0.5)
+# Text color.
+TEXT_COLOR = (0.0, 0.0, 0.0, 1.0)
+
+# Font family
+FONT_NAME = "Bitstream Vera Sans"
+# Title text font.
+TITLE_FONT_SIZE = 18
+# Default text font.
+TEXT_FONT_SIZE = 12
+# Axis label font.
+AXIS_FONT_SIZE = 11
+# Legend font.
+LEGEND_FONT_SIZE = 12
+
+# CPU load chart color.
+CPU_COLOR = (0.40, 0.55, 0.70, 1.0)
+# IO wait chart color.
+IO_COLOR = (0.76, 0.48, 0.48, 0.5)
+# Disk throughput color.
+DISK_TPUT_COLOR = (0.20, 0.71, 0.20, 1.0)
+# CPU load chart color.
+FILE_OPEN_COLOR = (0.20, 0.71, 0.71, 1.0)
+# Mem cached color
+MEM_CACHED_COLOR = CPU_COLOR
+# Mem used color
+MEM_USED_COLOR = IO_COLOR
+# Buffers color
+MEM_BUFFERS_COLOR = (0.4, 0.4, 0.4, 0.3)
+# Swap color
+MEM_SWAP_COLOR = DISK_TPUT_COLOR
+
+# Process border color.
+PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
+# Waiting process color.
+PROC_COLOR_D = (0.76, 0.48, 0.48, 0.5)
+# Running process color.
+PROC_COLOR_R = CPU_COLOR
+# Sleeping process color.
+PROC_COLOR_S = (0.94, 0.94, 0.94, 1.0)
+# Stopped process color.
+PROC_COLOR_T = (0.94, 0.50, 0.50, 1.0)
+# Zombie process color.
+PROC_COLOR_Z = (0.71, 0.71, 0.71, 1.0)
+# Dead process color.
+PROC_COLOR_X = (0.71, 0.71, 0.71, 0.125)
+# Paging process color.
+PROC_COLOR_W = (0.71, 0.71, 0.71, 0.125)
+
+# Process label color.
+PROC_TEXT_COLOR = (0.19, 0.19, 0.19, 1.0)
+# Process label font.
+PROC_TEXT_FONT_SIZE = 12
+
+# Signature color.
+SIG_COLOR = (0.0, 0.0, 0.0, 0.3125)
+# Signature font.
+SIG_FONT_SIZE = 14
+# Signature text.
+SIGNATURE = "http://github.com/mmeeks/bootchart"
+
+# Process dependency line color.
+DEP_COLOR = (0.75, 0.75, 0.75, 1.0)
+# Process dependency line stroke.
+DEP_STROKE = 1.0
+
+# Process description date format.
+DESC_TIME_FORMAT = "mm:ss.SSS"
+
+# Cumulative coloring bits
+HSV_MAX_MOD = 31
+HSV_STEP = 7
+
+# Configure task color
+TASK_COLOR_CONFIGURE = (1.0, 1.0, 0.00, 1.0)
+# Compile task color.
+TASK_COLOR_COMPILE = (0.0, 1.00, 0.00, 1.0)
+# Install task color
+TASK_COLOR_INSTALL = (1.0, 0.00, 1.00, 1.0)
+# Sysroot task color
+TASK_COLOR_SYSROOT = (0.0, 0.00, 1.00, 1.0)
+# Package task color
+TASK_COLOR_PACKAGE = (0.0, 1.00, 1.00, 1.0)
+# Package Write RPM/DEB/IPK task color
+TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
+
+# Distinct colors used for different disk volumnes.
+# If we have more volumns, colors get re-used.
+VOLUME_COLORS = [
+ (1.0, 1.0, 0.00, 1.0),
+ (0.0, 1.00, 0.00, 1.0),
+ (1.0, 0.00, 1.00, 1.0),
+ (0.0, 0.00, 1.00, 1.0),
+ (0.0, 1.00, 1.00, 1.0),
+]
+
+# Process states
+STATE_UNDEFINED = 0
+STATE_RUNNING = 1
+STATE_SLEEPING = 2
+STATE_WAITING = 3
+STATE_STOPPED = 4
+STATE_ZOMBIE = 5
+
+STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
+ PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+
+# CumulativeStats Types
+STAT_TYPE_CPU = 0
+STAT_TYPE_IO = 1
+
+# Convert ps process state to an int
+def get_proc_state(flag):
+ return "RSDTZXW".find(flag) + 1
+
+def draw_text(ctx, text, color, x, y):
+ ctx.set_source_rgba(*color)
+ ctx.move_to(x, y)
+ ctx.show_text(text)
+
+def draw_fill_rect(ctx, color, rect):
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.fill()
+
+def draw_rect(ctx, color, rect):
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.stroke()
+
+def draw_legend_box(ctx, label, fill_color, x, y, s):
+ draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+
+def draw_legend_line(ctx, label, fill_color, x, y, s):
+ draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
+ ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
+ ctx.fill()
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+
+def draw_label_in_box(ctx, color, label, x, y, w, maxx):
+ label_w = ctx.text_extents(label)[2]
+ label_x = x + w / 2 - label_w / 2
+ if label_w + 10 > w:
+ label_x = x + w + 5
+ if label_x + label_w > maxx:
+ label_x = x - label_w - 5
+ draw_text(ctx, label, color, label_x, y)
+
+def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
+ ctx.set_font_size(AXIS_FONT_SIZE)
+ prev_x = 0
+ for i in range(0, rect[2] + 1, sec_w):
+ if ((i / sec_w) % nsecs == 0) :
+ if options.app_options.as_minutes :
+ label = "%.1f" % (i / sec_w / 60.0)
+ else :
+ label = "%d" % (i / sec_w)
+ label_w = ctx.text_extents(label)[2]
+ x = rect[0] + i - label_w/2
+ if x >= prev_x:
+ draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ prev_x = x + label_w
+
+def draw_box_ticks(ctx, rect, sec_w):
+ draw_rect(ctx, BORDER_COLOR, tuple(rect))
+
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+
+ for i in range(sec_w, rect[2] + 1, sec_w):
+ if ((i / sec_w) % 10 == 0) :
+ ctx.set_line_width(1.5)
+ elif sec_w < 5 :
+ continue
+ else :
+ ctx.set_line_width(1.0)
+ if ((i / sec_w) % 30 == 0) :
+ ctx.set_source_rgba(*TICK_COLOR_BOLD)
+ else :
+ ctx.set_source_rgba(*TICK_COLOR)
+ ctx.move_to(rect[0] + i, rect[1] + 1)
+ ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+
+def draw_annotations(ctx, proc_tree, times, rect):
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+ ctx.set_source_rgba(*ANNOTATION_COLOR)
+ ctx.set_dash([4, 4])
+
+ for time in times:
+ if time is not None:
+ x = ((time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+
+ ctx.move_to(rect[0] + x, rect[1] + 1)
+ ctx.line_to(rect[0] + x, rect[1] + rect[3] - 1)
+ ctx.stroke()
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+ ctx.set_dash([])
+
+def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
+ ctx.set_line_width(0.5)
+ x_shift = proc_tree.start_time
+
+ def transform_point_coords(point, x_base, y_base, \
+ xscale, yscale, x_trans, y_trans):
+ x = (point[0] - x_base) * xscale + x_trans
+ y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+ return x, y
+
+ max_x = max (x for (x, y) in data)
+ max_y = max (y for (x, y) in data)
+ # avoid divide by zero
+ if max_y == 0:
+ max_y = 1.0
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ # If data_range is given, scale the chart so that the value range in
+ # data_range matches the chart bounds exactly.
+ # Otherwise, scale so that the actual data matches the chart bounds.
+ if data_range:
+ yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
+ ybase = data_range[0]
+ else:
+ yscale = float(chart_bounds[3]) / max_y
+ ybase = 0
+
+ first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+
+ ctx.set_source_rgba(*color)
+ ctx.move_to(*first)
+ for point in data:
+ x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ ctx.line_to(x, y)
+ if fill:
+ ctx.stroke_preserve()
+ ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], first[1])
+ ctx.fill()
+ else:
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+bar_h = 55
+meminfo_bar_h = 2 * bar_h
+header_h = 60
+# offsets
+off_x, off_y = 220, 10
+sec_w_base = 1 # the width of a second
+proc_h = 16 # the height of a process
+leg_s = 10
+MIN_IMG_W = 800
+CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
+OPTIONS = None
+
+def extents(options, xscale, trace):
+ start = min(trace.start.keys())
+ end = start
+
+ processes = 0
+ for proc in trace.processes:
+ if not options.app_options.show_all and \
+ trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
+ continue
+
+ if trace.processes[proc][1] > end:
+ end = trace.processes[proc][1]
+ processes += 1
+
+ if trace.min is not None and trace.max is not None:
+ start = trace.min
+ end = trace.max
+
+ w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
+ h = proc_h * processes + header_h + 2 * off_y
+
+ if options.charts:
+ if trace.cpu_stats:
+ h += 30 + bar_h
+ if trace.disk_stats:
+ h += 30 + bar_h
+ if trace.monitor_disk:
+ h += 30 + bar_h
+ if trace.mem_stats:
+ h += meminfo_bar_h
+
+ return (w, h)
+
+def clip_visible(clip, rect):
+ xmax = max (clip[0], rect[0])
+ ymax = max (clip[1], rect[1])
+ xmin = min (clip[0] + clip[2], rect[0] + rect[2])
+ ymin = min (clip[1] + clip[3], rect[1] + rect[3])
+ return (xmin > xmax and ymin > ymax)
+
+def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
+ proc_tree = options.proc_tree(trace)
+
+ # render bar legend
+ if trace.cpu_stats:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+
+ draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O wait
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree, None)
+ # render CPU load
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render second chart
+ if trace.disk_stats:
+ draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O utilization
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.util) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ # render disk throughput
+ max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+ [(sample.time, sample.tput) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+ label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+ draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render disk space usage
+ #
+ # Draws the amount of disk space used on each volume relative to the
+ # lowest recorded amount. The graphs for each volume are stacked above
+ # each other so that total disk usage is visible.
+ if trace.monitor_disk:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+ # Determine set of volumes for which we have
+ # information and the minimal amount of used disk
+ # space for each. Currently samples are allowed to
+ # not have a values for all volumes; drawing could be
+ # made more efficient if that wasn't the case.
+ volumes = set()
+ min_used = {}
+ for sample in trace.monitor_disk:
+ for volume, used in sample.records.items():
+ volumes.add(volume)
+ if volume not in min_used or min_used[volume] > used:
+ min_used[volume] = used
+ volumes = sorted(list(volumes))
+ disk_scale = 0
+ for i, volume in enumerate(volumes):
+ volume_scale = max([sample.records[volume] - min_used[volume]
+ for sample in trace.monitor_disk
+ if volume in sample.records])
+ # Does not take length of volume name into account, but fixed offset
+ # works okay in practice.
+ draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+ VOLUME_COLORS[i % len(VOLUME_COLORS)],
+ off_x + i * 250, curr_y+20, leg_s)
+ disk_scale += volume_scale
+
+ # render used amount of disk space
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ for i in range(len(volumes), 0, -1):
+ draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+ [(sample.time,
+ # Sum up used space of all volumes including the current one
+ # so that the graphs appear as stacked on top of each other.
+ reduce(lambda x,y: x+y,
+ [sample.records[volume] - min_used[volume]
+ for volume in volumes[0:i]
+ if volume in sample.records],
+ 0))
+ for sample in trace.monitor_disk], \
+ proc_tree, [0, disk_scale])
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render mem usage
+ chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ mem_stats = trace.mem_stats
+ if mem_stats and clip_visible (clip, chart_rect):
+ mem_scale = max(sample.buffers for sample in mem_stats)
+ draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
+ draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
+ [(sample.time, sample.buffers) for sample in trace.mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
+ [(sample.time, sample.used) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
+ [(sample.time, sample.cached) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
+ [(sample.time, float(sample.swap)) for sample in mem_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + meminfo_bar_h
+
+ return curr_y
+
+def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
+ chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - header_h - leg_s + proc_h]
+
+ draw_legend_box (ctx, "Configure", \
+ TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Compile", \
+ TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Install", \
+ TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Populate Sysroot", \
+ TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package", \
+ TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package Write",
+ TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
+
+ ctx.set_font_size(PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
+
+ y = curr_y+header_h
+
+ offset = trace.min or min(trace.start.keys())
+ for s in sorted(trace.start.keys()):
+ for val in sorted(trace.start[s]):
+ if not options.app_options.show_all and \
+ trace.processes[val][1] - s < options.app_options.mintime:
+ continue
+ task = val.split(":")[1]
+ #print val
+ #print trace.processes[val][1]
+ #print s
+ x = chart_rect[0] + (s - offset) * sec_w
+ w = ((trace.processes[val][1] - s) * sec_w)
+
+ #print "proc at %s %s %s %s" % (x, y, w, proc_h)
+ col = None
+ if task == "do_compile":
+ col = TASK_COLOR_COMPILE
+ elif task == "do_configure":
+ col = TASK_COLOR_CONFIGURE
+ elif task == "do_install":
+ col = TASK_COLOR_INSTALL
+ elif task == "do_populate_sysroot":
+ col = TASK_COLOR_SYSROOT
+ elif task == "do_package":
+ col = TASK_COLOR_PACKAGE
+ elif task == "do_package_write_rpm" or \
+ task == "do_package_write_deb" or \
+ task == "do_package_write_ipk":
+ col = TASK_COLOR_PACKAGE_WRITE
+ else:
+ col = WHITE
+
+ if col:
+ draw_fill_rect(ctx, col, (x, y, w, proc_h))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
+ y = y + proc_h
+
+ return curr_y
+
+#
+# Render the chart.
+#
+def render(ctx, options, xscale, trace):
+ (w, h) = extents (options, xscale, trace)
+ global OPTIONS
+ OPTIONS = options.app_options
+
+ # x, y, w, h
+ clip = ctx.clip_extents()
+
+ sec_w = int (xscale * sec_w_base)
+ ctx.set_line_width(1.0)
+ ctx.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
+ w -= 2*off_x
+ curr_y = off_y;
+
+ if options.charts:
+ curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+
+ curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
+
+ return
+
+ proc_tree = options.proc_tree (trace)
+
+ # draw the title and headers
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
+
+ if not options.kernel_only:
+ curr_y = draw_header (ctx, trace.headers, duration)
+ else:
+ curr_y = off_y;
+
+ # draw process boxes
+ proc_height = h
+ if proc_tree.taskstats and options.cumulative:
+ proc_height -= CUML_HEIGHT
+
+ draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
+ curr_y, w, proc_height, sec_w)
+
+ curr_y = proc_height
+ ctx.set_font_size(SIG_FONT_SIZE)
+ draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
+
+ # draw a cumulative CPU-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
+
+ # draw a cumulative I/O-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
+
+def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
+ header_size = 0
+ if not options.kernel_only:
+ draw_legend_box (ctx, "Running (%cpu)",
+ PROC_COLOR_R, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Unint.sleep (I/O)",
+ PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Sleeping",
+ PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Zombie",
+ PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
+ header_size = 45
+
+ chart_rect = [off_x, curr_y + header_size + 15,
+ w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
+ ctx.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ if sec_w > 100:
+ nsec = 1
+ else:
+ nsec = 5
+ draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
+ draw_annotations (ctx, proc_tree, times, chart_rect)
+
+ y = curr_y + 60
+ for root in proc_tree.process_tree:
+ draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
+ y = y + proc_h * proc_tree.num_nodes([root])
+
+
+def draw_header (ctx, headers, duration):
+ toshow = [
+ ('system.uname', 'uname', lambda s: s),
+ ('system.release', 'release', lambda s: s),
+ ('system.cpu', 'CPU', lambda s: re.sub('model name\s*:\s*', '', s, 1)),
+ ('system.kernel.options', 'kernel options', lambda s: s),
+ ]
+
+ header_y = ctx.font_extents()[2] + 10
+ ctx.set_font_size(TITLE_FONT_SIZE)
+ draw_text(ctx, headers['title'], TEXT_COLOR, off_x, header_y)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+
+ for (headerkey, headertitle, mangle) in toshow:
+ header_y += ctx.font_extents()[2]
+ if headerkey in headers:
+ value = headers.get(headerkey)
+ else:
+ value = ""
+ txt = headertitle + ': ' + mangle(value)
+ draw_text(ctx, txt, TEXT_COLOR, off_x, header_y)
+
+ dur = duration / 100.0
+ txt = 'time : %02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60))
+ if headers.get('system.maxpid') is not None:
+ txt = txt + ' max pid: %s' % (headers.get('system.maxpid'))
+
+ header_y += ctx.font_extents()[2]
+ draw_text (ctx, txt, TEXT_COLOR, off_x, header_y)
+
+ return header_y
+
+def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
+ x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ w = ((proc.duration) * rect[2] / proc_tree.duration)
+
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ ipid = int(proc.pid)
+ if not OPTIONS.show_all:
+ cmdString = proc.cmd
+ else:
+ cmdString = ''
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ cmdString = cmdString + " [" + str(ipid // 1000) + "]"
+ if OPTIONS.show_all:
+ if proc.args:
+ cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = cmdString + " " + proc.exe
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+
+ next_y = y + proc_h
+ for child in proc.child_list:
+ if next_y > clip[1] + clip[3]:
+ break
+ child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
+ draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
+ next_y = next_y + proc_h * proc_tree.num_nodes([child])
+
+ return x, y
+
+
+def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
+
+ if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
+ return
+
+ draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+
+ last_tx = -1
+ for sample in proc.samples :
+ tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
+
+ # samples are sorted chronologically
+ if tx < clip[0]:
+ continue
+ if tx > clip[0] + clip[2]:
+ break
+
+ tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
+ if last_tx != -1 and abs(last_tx - tx) <= tw:
+ tw -= last_tx - tx
+ tx = last_tx
+ tw = max (tw, 1) # nice to see at least something
+
+ last_tx = tx + tw
+ state = get_proc_state( sample.state )
+
+ color = STATE_COLORS[state]
+ if state == STATE_RUNNING:
+ alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
+ color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
+# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
+ elif state == STATE_SLEEPING:
+ continue
+
+ draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
+
+def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
+ ctx.set_source_rgba(*DEP_COLOR)
+ ctx.set_dash([2, 2])
+ if abs(px - x) < 3:
+ dep_off_x = 3
+ dep_off_y = proc_h / 4
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, py - dep_off_y)
+ ctx.line_to(px, py - dep_off_y)
+ else:
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px, y + proc_h / 2)
+ ctx.line_to(px, py)
+ ctx.stroke()
+ ctx.set_dash([])
+
+# elide the bootchart collector - it is quite distorting
+def elide_bootchart(proc):
+ return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+
+class CumlSample:
+ def __init__(self, proc):
+ self.cmd = proc.cmd
+ self.samples = []
+ self.merge_samples (proc)
+ self.color = None
+
+ def merge_samples(self, proc):
+ self.samples.extend (proc.samples)
+ self.samples.sort (key = lambda p: p.time)
+
+ def next(self):
+ global palette_idx
+ palette_idx += HSV_STEP
+ return palette_idx
+
+ def get_color(self):
+ if self.color is None:
+ i = self.next() % HSV_MAX_MOD
+ h = 0.0
+ if i is not 0:
+ h = (1.0 * i) / HSV_MAX_MOD
+ s = 0.5
+ v = 1.0
+ c = colorsys.hsv_to_rgb (h, s, v)
+ self.color = (c[0], c[1], c[2], 1.0)
+ return self.color
+
+
+def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
+ global palette_idx
+ palette_idx = 0
+
+ time_hash = {}
+ total_time = 0.0
+ m_proc_list = {}
+
+ if stat_type is STAT_TYPE_CPU:
+ sample_value = 'cpu'
+ else:
+ sample_value = 'io'
+ for proc in proc_tree.process_list:
+ if elide_bootchart(proc):
+ continue
+
+ for sample in proc.samples:
+ total_time += getattr(sample.cpu_sample, sample_value)
+ if not sample.time in time_hash:
+ time_hash[sample.time] = 1
+
+ # merge pids with the same cmd
+ if not proc.cmd in m_proc_list:
+ m_proc_list[proc.cmd] = CumlSample (proc)
+ continue
+ s = m_proc_list[proc.cmd]
+ s.merge_samples (proc)
+
+ # all the sample times
+ times = sorted(time_hash)
+ if len (times) < 2:
+ print("degenerate boot chart")
+ return
+
+ pix_per_ns = chart_bounds[3] / total_time
+# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
+
+ # FIXME: we have duplicates in the process list too [!] - why !?
+
+ # Render bottom up, left to right
+ below = {}
+ for time in times:
+ below[time] = chart_bounds[1] + chart_bounds[3]
+
+ # same colors each time we render
+ random.seed (0)
+
+ ctx.set_line_width(1)
+
+ legends = []
+ labels = []
+
+ # render each pid in order
+ for cs in m_proc_list.values():
+ row = {}
+ cuml = 0.0
+
+ # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
+ for sample in cs.samples:
+ cuml += getattr(sample.cpu_sample, sample_value)
+ row[sample.time] = cuml
+
+ process_total_time = cuml
+
+ # hide really tiny processes
+ if cuml * pix_per_ns <= 2:
+ continue
+
+ last_time = times[0]
+ y = last_below = below[last_time]
+ last_cuml = cuml = 0.0
+
+ ctx.set_source_rgba(*cs.get_color())
+ for time in times:
+ render_seg = False
+
+ # did the underlying trend increase ?
+ if below[time] != last_below:
+ last_below = below[last_time]
+ last_cuml = cuml
+ render_seg = True
+
+ # did we move up a pixel increase ?
+ if time in row:
+ nc = round (row[time] * pix_per_ns)
+ if nc != cuml:
+ last_cuml = cuml
+ cuml = nc
+ render_seg = True
+
+# if last_cuml > cuml:
+# assert fail ... - un-sorted process samples
+
+ # draw the trailing rectangle from the last time to
+ # before now, at the height of the last segment.
+ if render_seg:
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.fill()
+# ctx.stroke()
+ last_time = time
+ y = below [time] - cuml
+
+ row[time] = y
+
+ # render the last segment
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ y = below[last_time] - cuml
+ ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.fill()
+# ctx.stroke()
+
+ # render legend if it will fit
+ if cuml > 8:
+ label = cs.cmd
+ extnts = ctx.text_extents(label)
+ label_w = extnts[2]
+ label_h = extnts[3]
+# print "Text extents %g by %g" % (label_w, label_h)
+ labels.append((label,
+ chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ y + (cuml + label_h) / 2))
+ if cs in legends:
+ print("ARGH - duplicate process in list !")
+
+ legends.append ((cs, process_total_time))
+
+ below = row
+
+ # render grid-lines over the top
+ draw_box_ticks(ctx, chart_bounds, sec_w)
+
+ # render labels
+ for l in labels:
+ draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+
+ # Render legends
+ font_height = 20
+ label_width = 300
+ LEGENDS_PER_COL = 15
+ LEGENDS_TOTAL = 45
+ ctx.set_font_size (TITLE_FONT_SIZE)
+ dur_secs = duration / 100
+ cpu_secs = total_time / 1000000000
+
+ # misleading - with multiple CPUs ...
+# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
+ if stat_type is STAT_TYPE_CPU:
+ label = "Cumulative CPU usage, by process; total CPU: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+ else:
+ label = "Cumulative I/O usage, by process; total I/O: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+
+ draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ chart_bounds[1] + font_height)
+
+ i = 0
+ legends = sorted(legends, key=itemgetter(1), reverse=True)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+ for t in legends:
+ cs = t[0]
+ time = t[1]
+ x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
+ str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
+ draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ i = i + 1
+ if i >= LEGENDS_TOTAL:
+ break
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/gui.py b/poky/scripts/pybootchartgui/pybootchartgui/gui.py
new file mode 100644
index 000000000..7fedd232d
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/gui.py
@@ -0,0 +1,350 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+import gobject
+import gtk
+import gtk.gdk
+import gtk.keysyms
+from . import draw
+from .draw import RenderOptions
+
+class PyBootchartWidget(gtk.DrawingArea):
+ __gsignals__ = {
+ 'expose-event': 'override',
+ 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
+ 'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
+ 'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
+ }
+
+ def __init__(self, trace, options, xscale):
+ gtk.DrawingArea.__init__(self)
+
+ self.trace = trace
+ self.options = options
+
+ self.set_flags(gtk.CAN_FOCUS)
+
+ self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.connect("button-press-event", self.on_area_button_press)
+ self.connect("button-release-event", self.on_area_button_release)
+ self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.connect("motion-notify-event", self.on_area_motion_notify)
+ self.connect("scroll-event", self.on_area_scroll_event)
+ self.connect('key-press-event', self.on_key_press_event)
+
+ self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
+ self.connect("size-allocate", self.on_allocation_size_changed)
+ self.connect("position-changed", self.on_position_changed)
+
+ self.zoom_ratio = 1.0
+ self.xscale = xscale
+ self.x, self.y = 0.0, 0.0
+
+ self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
+ self.hadj = None
+ self.vadj = None
+ self.hadj_changed_signal_id = None
+ self.vadj_changed_signal_id = None
+
+ def do_expose_event(self, event):
+ cr = self.window.cairo_create()
+
+ # set a clip region for the expose event
+ cr.rectangle(
+ event.area.x, event.area.y,
+ event.area.width, event.area.height
+ )
+ cr.clip()
+ self.draw(cr, self.get_allocation())
+ return False
+
+ def draw(self, cr, rect):
+ cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
+ cr.paint()
+ cr.scale(self.zoom_ratio, self.zoom_ratio)
+ cr.translate(-self.x, -self.y)
+ draw.render(cr, self.options, self.xscale, self.trace)
+
+ def position_changed(self):
+ self.emit("position-changed", self.x, self.y)
+
+ ZOOM_INCREMENT = 1.25
+
+ def zoom_image (self, zoom_ratio):
+ self.zoom_ratio = zoom_ratio
+ self._set_scroll_adjustments (self.hadj, self.vadj)
+ self.queue_draw()
+
+ def zoom_to_rect (self, rect):
+ zoom_ratio = float(rect.width)/float(self.chart_width)
+ self.zoom_image(zoom_ratio)
+ self.x = 0
+ self.position_changed()
+
+ def set_xscale(self, xscale):
+ old_mid_x = self.x + self.hadj.page_size / 2
+ self.xscale = xscale
+ self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
+ new_x = old_mid_x
+ self.zoom_image (self.zoom_ratio)
+
+ def on_expand(self, action):
+ self.set_xscale (int(self.xscale * 1.5 + 0.5))
+
+ def on_contract(self, action):
+ self.set_xscale (max(int(self.xscale / 1.5), 1))
+
+ def on_zoom_in(self, action):
+ self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
+
+ def on_zoom_out(self, action):
+ self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
+
+ def on_zoom_fit(self, action):
+ self.zoom_to_rect(self.get_allocation())
+
+ def on_zoom_100(self, action):
+ self.zoom_image(1.0)
+ self.set_xscale(1.0)
+
+ def show_toggled(self, button):
+ self.options.app_options.show_all = button.get_property ('active')
+ self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
+ self._set_scroll_adjustments(self.hadj, self.vadj)
+ self.queue_draw()
+
+ POS_INCREMENT = 100
+
+ def on_key_press_event(self, widget, event):
+ if event.keyval == gtk.keysyms.Left:
+ self.x -= self.POS_INCREMENT/self.zoom_ratio
+ elif event.keyval == gtk.keysyms.Right:
+ self.x += self.POS_INCREMENT/self.zoom_ratio
+ elif event.keyval == gtk.keysyms.Up:
+ self.y -= self.POS_INCREMENT/self.zoom_ratio
+ elif event.keyval == gtk.keysyms.Down:
+ self.y += self.POS_INCREMENT/self.zoom_ratio
+ else:
+ return False
+ self.queue_draw()
+ self.position_changed()
+ return True
+
+ def on_area_button_press(self, area, event):
+ if event.button == 2 or event.button == 1:
+ area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
+ self.prevmousex = event.x
+ self.prevmousey = event.y
+ if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
+ return False
+ return False
+
+ def on_area_button_release(self, area, event):
+ if event.button == 2 or event.button == 1:
+ area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
+ self.prevmousex = None
+ self.prevmousey = None
+ return True
+ return False
+
+ def on_area_scroll_event(self, area, event):
+ if event.state & gtk.gdk.CONTROL_MASK:
+ if event.direction == gtk.gdk.SCROLL_UP:
+ self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
+ return True
+ if event.direction == gtk.gdk.SCROLL_DOWN:
+ self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
+ return True
+ return False
+
+ def on_area_motion_notify(self, area, event):
+ state = event.state
+ if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ x, y = int(event.x), int(event.y)
+ # pan the image
+ self.x += (self.prevmousex - x)/self.zoom_ratio
+ self.y += (self.prevmousey - y)/self.zoom_ratio
+ self.queue_draw()
+ self.prevmousex = x
+ self.prevmousey = y
+ self.position_changed()
+ return True
+
+ def on_set_scroll_adjustments(self, area, hadj, vadj):
+ self._set_scroll_adjustments (hadj, vadj)
+
+ def on_allocation_size_changed(self, widget, allocation):
+ self.hadj.page_size = allocation.width
+ self.hadj.page_increment = allocation.width * 0.9
+ self.vadj.page_size = allocation.height
+ self.vadj.page_increment = allocation.height * 0.9
+
+ def _set_adj_upper(self, adj, upper):
+ changed = False
+ value_changed = False
+
+ if adj.upper != upper:
+ adj.upper = upper
+ changed = True
+
+ max_value = max(0.0, upper - adj.page_size)
+ if adj.value > max_value:
+ adj.value = max_value
+ value_changed = True
+
+ if changed:
+ adj.changed()
+ if value_changed:
+ adj.value_changed()
+
+ def _set_scroll_adjustments(self, hadj, vadj):
+ if hadj == None:
+ hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ if vadj == None:
+ vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+
+ if self.hadj_changed_signal_id != None and \
+ self.hadj != None and hadj != self.hadj:
+ self.hadj.disconnect (self.hadj_changed_signal_id)
+ if self.vadj_changed_signal_id != None and \
+ self.vadj != None and vadj != self.vadj:
+ self.vadj.disconnect (self.vadj_changed_signal_id)
+
+ if hadj != None:
+ self.hadj = hadj
+ self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
+ self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
+
+ if vadj != None:
+ self.vadj = vadj
+ self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
+ self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
+
+ def on_adjustments_changed(self, adj):
+ self.x = self.hadj.value / self.zoom_ratio
+ self.y = self.vadj.value / self.zoom_ratio
+ self.queue_draw()
+
+ def on_position_changed(self, widget, x, y):
+ self.hadj.value = x * self.zoom_ratio
+ self.vadj.value = y * self.zoom_ratio
+
+PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
+
+class PyBootchartShell(gtk.VBox):
+ ui = '''
+ <ui>
+ <toolbar name="ToolBar">
+ <toolitem action="Expand"/>
+ <toolitem action="Contract"/>
+ <separator/>
+ <toolitem action="ZoomIn"/>
+ <toolitem action="ZoomOut"/>
+ <toolitem action="ZoomFit"/>
+ <toolitem action="Zoom100"/>
+ </toolbar>
+ </ui>
+ '''
+ def __init__(self, window, trace, options, xscale):
+ gtk.VBox.__init__(self)
+
+ self.widget = PyBootchartWidget(trace, options, xscale)
+
+ # Create a UIManager instance
+ uimanager = self.uimanager = gtk.UIManager()
+
+ # Add the accelerator group to the toplevel window
+ accelgroup = uimanager.get_accel_group()
+ window.add_accel_group(accelgroup)
+
+ # Create an ActionGroup
+ actiongroup = gtk.ActionGroup('Actions')
+ self.actiongroup = actiongroup
+
+ # Create actions
+ actiongroup.add_actions((
+ ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
+ ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ))
+
+ # Add the actiongroup to the uimanager
+ uimanager.insert_action_group(actiongroup, 0)
+
+ # Add a UI description
+ uimanager.add_ui_from_string(self.ui)
+
+ # Scrolled window
+ scrolled = gtk.ScrolledWindow()
+ scrolled.add(self.widget)
+
+ # toolbar / h-box
+ hbox = gtk.HBox(False, 8)
+
+ # Create a Toolbar
+ toolbar = uimanager.get_widget('/ToolBar')
+ hbox.pack_start(toolbar, True, True)
+
+ if not options.kernel_only:
+ # Misc. options
+ button = gtk.CheckButton("Show more")
+ button.connect ('toggled', self.widget.show_toggled)
+ button.set_active(options.app_options.show_all)
+ hbox.pack_start (button, False, True)
+
+ self.pack_start(hbox, False)
+ self.pack_start(scrolled)
+ self.show_all()
+
+ def grab_focus(self, window):
+ window.set_focus(self.widget)
+
+
+class PyBootchartWindow(gtk.Window):
+
+ def __init__(self, trace, app_options):
+ gtk.Window.__init__(self)
+
+ window = self
+ window.set_title("Bootchart %s" % trace.filename)
+ window.set_default_size(750, 550)
+
+ tab_page = gtk.Notebook()
+ tab_page.show()
+ window.add(tab_page)
+
+ full_opts = RenderOptions(app_options)
+ full_tree = PyBootchartShell(window, trace, full_opts, 1.0)
+ tab_page.append_page (full_tree, gtk.Label("Full tree"))
+
+ if trace.kernel is not None and len (trace.kernel) > 2:
+ kernel_opts = RenderOptions(app_options)
+ kernel_opts.cumulative = False
+ kernel_opts.charts = False
+ kernel_opts.kernel_only = True
+ kernel_tree = PyBootchartShell(window, trace, kernel_opts, 5.0)
+ tab_page.append_page (kernel_tree, gtk.Label("Kernel boot"))
+
+ full_tree.grab_focus(self)
+ self.show()
+
+
+def show(trace, options):
+ win = PyBootchartWindow(trace, options)
+ win.connect('destroy', gtk.main_quit)
+ gtk.main()
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/main.py b/poky/scripts/pybootchartgui/pybootchartgui/main.py
new file mode 120000
index 000000000..b45ae0a3d
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/main.py
@@ -0,0 +1 @@
+main.py.in \ No newline at end of file
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/main.py.in b/poky/scripts/pybootchartgui/pybootchartgui/main.py.in
new file mode 100644
index 000000000..a954b125d
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/main.py.in
@@ -0,0 +1,183 @@
+#
+# ***********************************************************************
+# Warning: This file is auto-generated from main.py.in - edit it there.
+# ***********************************************************************
+#
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import os
+import optparse
+
+from . import parsing
+from . import batch
+
+def _mk_options_parser():
+ """Make an options parser."""
+ usage = "%prog [options] /path/to/tmp/buildstats/<recipe-machine>/<BUILDNAME>/"
+ version = "%prog v1.0.0"
+ parser = optparse.OptionParser(usage, version=version)
+ parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False,
+ help="start in active mode")
+ parser.add_option("-f", "--format", dest="format", default="png", choices=["png", "svg", "pdf"],
+ help="image format (png, svg, pdf); default format png")
+ parser.add_option("-o", "--output", dest="output", metavar="PATH", default=None,
+ help="output path (file or directory) where charts are stored")
+ parser.add_option("-s", "--split", dest="num", type=int, default=1,
+ help="split the output chart into <NUM> charts, only works with \"-o PATH\"")
+ parser.add_option("-m", "--mintime", dest="mintime", type=int, default=8,
+ help="only tasks longer than this time will be displayed")
+ parser.add_option("-M", "--minutes", action="store_true", dest="as_minutes", default=False,
+ help="display time in minutes instead of seconds")
+# parser.add_option("-n", "--no-prune", action="store_false", dest="prune", default=True,
+# help="do not prune the process tree")
+ parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
+ help="suppress informational messages")
+# parser.add_option("-t", "--boot-time", action="store_true", dest="boottime", default=False,
+# help="only display the boot time of the boot in text format (stdout)")
+ parser.add_option("--very-quiet", action="store_true", dest="veryquiet", default=False,
+ help="suppress all messages except errors")
+ parser.add_option("--verbose", action="store_true", dest="verbose", default=False,
+ help="print all messages")
+# parser.add_option("--profile", action="store_true", dest="profile", default=False,
+# help="profile rendering of chart (only useful when in batch mode indicated by -f)")
+# parser.add_option("--show-pid", action="store_true", dest="show_pid", default=False,
+# help="show process ids in the bootchart as 'processname [pid]'")
+ parser.add_option("--show-all", action="store_true", dest="show_all", default=False,
+ help="show all processes in the chart")
+# parser.add_option("--crop-after", dest="crop_after", metavar="PROCESS", default=None,
+# help="crop chart when idle after PROCESS is started")
+# parser.add_option("--annotate", action="append", dest="annotate", metavar="PROCESS", default=None,
+# help="annotate position where PROCESS is started; can be specified multiple times. " +
+# "To create a single annotation when any one of a set of processes is started, use commas to separate the names")
+# parser.add_option("--annotate-file", dest="annotate_file", metavar="FILENAME", default=None,
+# help="filename to write annotation points to")
+ parser.add_option("-T", "--full-time", action="store_true", dest="full_time", default=False,
+ help="display the full time regardless of which processes are currently shown")
+ return parser
+
+class Writer:
+ def __init__(self, write, options):
+ self.write = write
+ self.options = options
+
+ def error(self, msg):
+ self.write(msg)
+
+ def warn(self, msg):
+ if not self.options.quiet:
+ self.write(msg)
+
+ def info(self, msg):
+ if self.options.verbose:
+ self.write(msg)
+
+ def status(self, msg):
+ if not self.options.quiet:
+ self.write(msg)
+
+def _mk_writer(options):
+ def write(s):
+ print(s)
+ return Writer(write, options)
+
+def _get_filename(path):
+ """Construct a usable filename for outputs"""
+ dname = "."
+ fname = "bootchart"
+ if path != None:
+ if os.path.isdir(path):
+ dname = path
+ else:
+ fname = path
+ return os.path.join(dname, fname)
+
+def main(argv=None):
+ try:
+ if argv is None:
+ argv = sys.argv[1:]
+
+ parser = _mk_options_parser()
+ options, args = parser.parse_args(argv)
+
+ # Default values for disabled options
+ options.prune = True
+ options.boottime = False
+ options.profile = False
+ options.show_pid = False
+ options.crop_after = None
+ options.annotate = None
+ options.annotate_file = None
+
+ writer = _mk_writer(options)
+
+ if len(args) == 0:
+ print("No path given, trying /var/log/bootchart.tgz")
+ args = [ "/var/log/bootchart.tgz" ]
+
+ res = parsing.Trace(writer, args, options)
+
+ if options.interactive or options.output == None:
+ from . import gui
+ gui.show(res, options)
+ elif options.boottime:
+ import math
+ proc_tree = res.proc_tree
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
+ dur = duration / 100.0
+ print('%02d:%05.2f' % (math.floor(dur/60), dur - 60 * math.floor(dur/60)))
+ else:
+ if options.annotate_file:
+ f = open (options.annotate_file, "w")
+ try:
+ for time in res[4]:
+ if time is not None:
+ # output as ms
+ f.write(time * 10)
+ finally:
+ f.close()
+ filename = _get_filename(options.output)
+ res_list = parsing.split_res(res, options)
+ n = 1
+ width = len(str(len(res_list)))
+ s = "_%%0%dd." % width
+ for r in res_list:
+ if len(res_list) == 1:
+ f = filename + "." + options.format
+ else:
+ f = filename + s % n + options.format
+ n = n + 1
+ def render():
+ batch.render(writer, r, options, f)
+ if options.profile:
+ import cProfile
+ import pstats
+ profile = '%s.prof' % os.path.splitext(filename)[0]
+ cProfile.runctx('render()', globals(), locals(), profile)
+ p = pstats.Stats(profile)
+ p.strip_dirs().sort_stats('time').print_stats(20)
+ else:
+ render()
+
+ return 0
+ except parsing.ParseError as ex:
+ print(("Parse error: %s" % ex))
+ return 2
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/parsing.py b/poky/scripts/pybootchartgui/pybootchartgui/parsing.py
new file mode 100644
index 000000000..bcfb2da56
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -0,0 +1,821 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import string
+import re
+import sys
+import tarfile
+from time import clock
+from collections import defaultdict
+from functools import reduce
+
+from .samples import *
+from .process_tree import ProcessTree
+
+if sys.version_info >= (3, 0):
+ long = int
+
+# Parsing produces as its end result a 'Trace'
+
+class Trace:
+ def __init__(self, writer, paths, options):
+ self.processes = {}
+ self.start = {}
+ self.end = {}
+ self.min = None
+ self.max = None
+ self.headers = None
+ self.disk_stats = []
+ self.ps_stats = None
+ self.taskstats = None
+ self.cpu_stats = []
+ self.cmdline = None
+ self.kernel = None
+ self.kernel_tree = None
+ self.filename = None
+ self.parent_map = None
+ self.mem_stats = []
+ self.monitor_disk = None
+ self.times = [] # Always empty, but expected by draw.py when drawing system charts.
+
+ if len(paths):
+ parse_paths (writer, self, paths)
+ if not self.valid():
+ raise ParseError("empty state: '%s' does not contain a valid bootchart" % ", ".join(paths))
+
+ if options.full_time:
+ self.min = min(self.start.keys())
+ self.max = max(self.end.keys())
+
+
+ # Rendering system charts depends on start and end
+ # time. Provide them where the original drawing code expects
+ # them, i.e. in proc_tree.
+ class BitbakeProcessTree:
+ def __init__(self, start_time, end_time):
+ self.start_time = start_time
+ self.end_time = end_time
+ self.duration = self.end_time - self.start_time
+ self.proc_tree = BitbakeProcessTree(min(self.start.keys()),
+ max(self.end.keys()))
+
+
+ return
+
+ # Turn that parsed information into something more useful
+ # link processes into a tree of pointers, calculate statistics
+ self.compile(writer)
+
+ # Crop the chart to the end of the first idle period after the given
+ # process
+ if options.crop_after:
+ idle = self.crop (writer, options.crop_after)
+ else:
+ idle = None
+
+ # Annotate other times as the first start point of given process lists
+ self.times = [ idle ]
+ if options.annotate:
+ for procnames in options.annotate:
+ names = [x[:15] for x in procnames.split(",")]
+ for proc in self.ps_stats.process_map.values():
+ if proc.cmd in names:
+ self.times.append(proc.start_time)
+ break
+ else:
+ self.times.append(None)
+
+ self.proc_tree = ProcessTree(writer, self.kernel, self.ps_stats,
+ self.ps_stats.sample_period,
+ self.headers.get("profile.process"),
+ options.prune, idle, self.taskstats,
+ self.parent_map is not None)
+
+ if self.kernel is not None:
+ self.kernel_tree = ProcessTree(writer, self.kernel, None, 0,
+ self.headers.get("profile.process"),
+ False, None, None, True)
+
+ def valid(self):
+ return len(self.processes) != 0
+ return self.headers != None and self.disk_stats != None and \
+ self.ps_stats != None and self.cpu_stats != None
+
+ def add_process(self, process, start, end):
+ self.processes[process] = [start, end]
+ if start not in self.start:
+ self.start[start] = []
+ if process not in self.start[start]:
+ self.start[start].append(process)
+ if end not in self.end:
+ self.end[end] = []
+ if process not in self.end[end]:
+ self.end[end].append(process)
+
+ def compile(self, writer):
+
+ def find_parent_id_for(pid):
+ if pid is 0:
+ return 0
+ ppid = self.parent_map.get(pid)
+ if ppid:
+ # many of these double forks are so short lived
+ # that we have no samples, or process info for them
+ # so climb the parent hierarcy to find one
+ if int (ppid * 1000) not in self.ps_stats.process_map:
+# print "Pid '%d' short lived with no process" % ppid
+ ppid = find_parent_id_for (ppid)
+# else:
+# print "Pid '%d' has an entry" % ppid
+ else:
+# print "Pid '%d' missing from pid map" % pid
+ return 0
+ return ppid
+
+ # merge in the cmdline data
+ if self.cmdline is not None:
+ for proc in self.ps_stats.process_map.values():
+ rpid = int (proc.pid // 1000)
+ if rpid in self.cmdline:
+ cmd = self.cmdline[rpid]
+ proc.exe = cmd['exe']
+ proc.args = cmd['args']
+# else:
+# print "proc %d '%s' not in cmdline" % (rpid, proc.exe)
+
+ # re-parent any stray orphans if we can
+ if self.parent_map is not None:
+ for process in self.ps_stats.process_map.values():
+ ppid = find_parent_id_for (int(process.pid // 1000))
+ if ppid:
+ process.ppid = ppid * 1000
+
+ # stitch the tree together with pointers
+ for process in self.ps_stats.process_map.values():
+ process.set_parent (self.ps_stats.process_map)
+
+ # count on fingers variously
+ for process in self.ps_stats.process_map.values():
+ process.calc_stats (self.ps_stats.sample_period)
+
+ def crop(self, writer, crop_after):
+
+ def is_idle_at(util, start, j):
+ k = j + 1
+ while k < len(util) and util[k][0] < start + 300:
+ k += 1
+ k = min(k, len(util)-1)
+
+ if util[j][1] >= 0.25:
+ return False
+
+ avgload = sum(u[1] for u in util[j:k+1]) / (k-j+1)
+ if avgload < 0.25:
+ return True
+ else:
+ return False
+ def is_idle(util, start):
+ for j in range(0, len(util)):
+ if util[j][0] < start:
+ continue
+ return is_idle_at(util, start, j)
+ else:
+ return False
+
+ names = [x[:15] for x in crop_after.split(",")]
+ for proc in self.ps_stats.process_map.values():
+ if proc.cmd in names or proc.exe in names:
+ writer.info("selected proc '%s' from list (start %d)"
+ % (proc.cmd, proc.start_time))
+ break
+ if proc is None:
+ writer.warn("no selected crop proc '%s' in list" % crop_after)
+
+
+ cpu_util = [(sample.time, sample.user + sample.sys + sample.io) for sample in self.cpu_stats]
+ disk_util = [(sample.time, sample.util) for sample in self.disk_stats]
+
+ idle = None
+ for i in range(0, len(cpu_util)):
+ if cpu_util[i][0] < proc.start_time:
+ continue
+ if is_idle_at(cpu_util, cpu_util[i][0], i) \
+ and is_idle(disk_util, cpu_util[i][0]):
+ idle = cpu_util[i][0]
+ break
+
+ if idle is None:
+ writer.warn ("not idle after proc '%s'" % crop_after)
+ return None
+
+ crop_at = idle + 300
+ writer.info ("cropping at time %d" % crop_at)
+ while len (self.cpu_stats) \
+ and self.cpu_stats[-1].time > crop_at:
+ self.cpu_stats.pop()
+ while len (self.disk_stats) \
+ and self.disk_stats[-1].time > crop_at:
+ self.disk_stats.pop()
+
+ self.ps_stats.end_time = crop_at
+
+ cropped_map = {}
+ for key, value in self.ps_stats.process_map.items():
+ if (value.start_time <= crop_at):
+ cropped_map[key] = value
+
+ for proc in cropped_map.values():
+ proc.duration = min (proc.duration, crop_at - proc.start_time)
+ while len (proc.samples) \
+ and proc.samples[-1].time > crop_at:
+ proc.samples.pop()
+
+ self.ps_stats.process_map = cropped_map
+
+ return idle
+
+
+
+class ParseError(Exception):
+ """Represents errors during parse of the bootchart."""
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return self.value
+
+def _parse_headers(file):
+ """Parses the headers of the bootchart."""
+ def parse(acc, line):
+ (headers, last) = acc
+ if '=' in line:
+ last, value = map (lambda x: x.strip(), line.split('=', 1))
+ else:
+ value = line.strip()
+ headers[last] += value
+ return headers, last
+ return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
+
+def _parse_timed_blocks(file):
+ """Parses (ie., splits) a file into so-called timed-blocks. A
+ timed-block consists of a timestamp on a line by itself followed
+ by zero or more lines of data for that point in time."""
+ def parse(block):
+ lines = block.split('\n')
+ if not lines:
+ raise ParseError('expected a timed-block consisting a timestamp followed by data lines')
+ try:
+ return (int(lines[0]), lines[1:])
+ except ValueError:
+ raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
+ blocks = file.read().decode('utf-8').split('\n\n')
+ return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
+
+def _parse_proc_ps_log(writer, file):
+ """
+ * See proc(5) for details.
+ *
+ * {pid, comm, state, ppid, pgrp, session, tty_nr, tpgid, flags, minflt, cminflt, majflt, cmajflt, utime, stime,
+ * cutime, cstime, priority, nice, 0, itrealvalue, starttime, vsize, rss, rlim, startcode, endcode, startstack,
+ * kstkesp, kstkeip}
+ """
+ processMap = {}
+ ltime = 0
+ timed_blocks = _parse_timed_blocks(file)
+ for time, lines in timed_blocks:
+ for line in lines:
+ if not line: continue
+ tokens = line.split(' ')
+ if len(tokens) < 21:
+ continue
+
+ offset = [index for index, token in enumerate(tokens[1:]) if token[-1] == ')'][0]
+ pid, cmd, state, ppid = int(tokens[0]), ' '.join(tokens[1:2+offset]), tokens[2+offset], int(tokens[3+offset])
+ userCpu, sysCpu, stime = int(tokens[13+offset]), int(tokens[14+offset]), int(tokens[21+offset])
+
+ # magic fixed point-ness ...
+ pid *= 1000
+ ppid *= 1000
+ if pid in processMap:
+ process = processMap[pid]
+ process.cmd = cmd.strip('()') # why rename after latest name??
+ else:
+ process = Process(writer, pid, cmd.strip('()'), ppid, min(time, stime))
+ processMap[pid] = process
+
+ if process.last_user_cpu_time is not None and process.last_sys_cpu_time is not None and ltime is not None:
+ userCpuLoad, sysCpuLoad = process.calc_load(userCpu, sysCpu, max(1, time - ltime))
+ cpuSample = CPUSample('null', userCpuLoad, sysCpuLoad, 0.0)
+ process.samples.append(ProcessSample(time, state, cpuSample))
+
+ process.last_user_cpu_time = userCpu
+ process.last_sys_cpu_time = sysCpu
+ ltime = time
+
+ if len (timed_blocks) < 2:
+ return None
+
+ startTime = timed_blocks[0][0]
+ avgSampleLength = (ltime - startTime)/(len (timed_blocks) - 1)
+
+ return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
+
+def _parse_taskstats_log(writer, file):
+ """
+ * See bootchart-collector.c for details.
+ *
+ * { pid, ppid, comm, cpu_run_real_total, blkio_delay_total, swapin_delay_total }
+ *
+ """
+ processMap = {}
+ pidRewrites = {}
+ ltime = None
+ timed_blocks = _parse_timed_blocks(file)
+ for time, lines in timed_blocks:
+ # we have no 'stime' from taskstats, so prep 'init'
+ if ltime is None:
+ process = Process(writer, 1, '[init]', 0, 0)
+ processMap[1000] = process
+ ltime = time
+# continue
+ for line in lines:
+ if not line: continue
+ tokens = line.split(' ')
+ if len(tokens) != 6:
+ continue
+
+ opid, ppid, cmd = int(tokens[0]), int(tokens[1]), tokens[2]
+ cpu_ns, blkio_delay_ns, swapin_delay_ns = long(tokens[-3]), long(tokens[-2]), long(tokens[-1]),
+
+ # make space for trees of pids
+ opid *= 1000
+ ppid *= 1000
+
+ # when the process name changes, we re-write the pid.
+ if opid in pidRewrites:
+ pid = pidRewrites[opid]
+ else:
+ pid = opid
+
+ cmd = cmd.strip('(').strip(')')
+ if pid in processMap:
+ process = processMap[pid]
+ if process.cmd != cmd:
+ pid += 1
+ pidRewrites[opid] = pid
+# print "process mutation ! '%s' vs '%s' pid %s -> pid %s\n" % (process.cmd, cmd, opid, pid)
+ process = process.split (writer, pid, cmd, ppid, time)
+ processMap[pid] = process
+ else:
+ process.cmd = cmd;
+ else:
+ process = Process(writer, pid, cmd, ppid, time)
+ processMap[pid] = process
+
+ delta_cpu_ns = (float) (cpu_ns - process.last_cpu_ns)
+ delta_blkio_delay_ns = (float) (blkio_delay_ns - process.last_blkio_delay_ns)
+ delta_swapin_delay_ns = (float) (swapin_delay_ns - process.last_swapin_delay_ns)
+
+ # make up some state data ...
+ if delta_cpu_ns > 0:
+ state = "R"
+ elif delta_blkio_delay_ns + delta_swapin_delay_ns > 0:
+ state = "D"
+ else:
+ state = "S"
+
+ # retain the ns timing information into a CPUSample - that tries
+ # with the old-style to be a %age of CPU used in this time-slice.
+ if delta_cpu_ns + delta_blkio_delay_ns + delta_swapin_delay_ns > 0:
+# print "proc %s cpu_ns %g delta_cpu %g" % (cmd, cpu_ns, delta_cpu_ns)
+ cpuSample = CPUSample('null', delta_cpu_ns, 0.0,
+ delta_blkio_delay_ns,
+ delta_swapin_delay_ns)
+ process.samples.append(ProcessSample(time, state, cpuSample))
+
+ process.last_cpu_ns = cpu_ns
+ process.last_blkio_delay_ns = blkio_delay_ns
+ process.last_swapin_delay_ns = swapin_delay_ns
+ ltime = time
+
+ if len (timed_blocks) < 2:
+ return None
+
+ startTime = timed_blocks[0][0]
+ avgSampleLength = (ltime - startTime)/(len(timed_blocks)-1)
+
+ return ProcessStats (writer, processMap, len (timed_blocks), avgSampleLength, startTime, ltime)
+
+def _parse_proc_stat_log(file):
+ samples = []
+ ltimes = None
+ for time, lines in _parse_timed_blocks(file):
+ # skip emtpy lines
+ if not lines:
+ continue
+ # CPU times {user, nice, system, idle, io_wait, irq, softirq}
+ tokens = lines[0].split()
+ times = [ int(token) for token in tokens[1:] ]
+ if ltimes:
+ user = float((times[0] + times[1]) - (ltimes[0] + ltimes[1]))
+ system = float((times[2] + times[5] + times[6]) - (ltimes[2] + ltimes[5] + ltimes[6]))
+ idle = float(times[3] - ltimes[3])
+ iowait = float(times[4] - ltimes[4])
+
+ aSum = max(user + system + idle + iowait, 1)
+ samples.append( CPUSample(time, user/aSum, system/aSum, iowait/aSum) )
+
+ ltimes = times
+ # skip the rest of statistics lines
+ return samples
+
+def _parse_reduced_log(file, sample_class):
+ samples = []
+ for time, lines in _parse_timed_blocks(file):
+ samples.append(sample_class(time, *[float(x) for x in lines[0].split()]))
+ return samples
+
+def _parse_proc_disk_stat_log(file):
+ """
+ Parse file for disk stats, but only look at the whole device, eg. sda,
+ not sda1, sda2 etc. The format of relevant lines should be:
+ {major minor name rio rmerge rsect ruse wio wmerge wsect wuse running use aveq}
+ """
+ disk_regex_re = re.compile ('^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+
+ # this gets called an awful lot.
+ def is_relevant_line(linetokens):
+ if len(linetokens) != 14:
+ return False
+ disk = linetokens[2]
+ return disk_regex_re.match(disk)
+
+ disk_stat_samples = []
+
+ for time, lines in _parse_timed_blocks(file):
+ sample = DiskStatSample(time)
+ relevant_tokens = [linetokens for linetokens in map (lambda x: x.split(),lines) if is_relevant_line(linetokens)]
+
+ for tokens in relevant_tokens:
+ disk, rsect, wsect, use = tokens[2], int(tokens[5]), int(tokens[9]), int(tokens[12])
+ sample.add_diskdata([rsect, wsect, use])
+
+ disk_stat_samples.append(sample)
+
+ disk_stats = []
+ for sample1, sample2 in zip(disk_stat_samples[:-1], disk_stat_samples[1:]):
+ interval = sample1.time - sample2.time
+ if interval == 0:
+ interval = 1
+ sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ]
+ readTput = sums[0] / 2.0 * 100.0 / interval
+ writeTput = sums[1] / 2.0 * 100.0 / interval
+ util = float( sums[2] ) / 10 / interval
+ util = max(0.0, min(1.0, util))
+ disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util))
+
+ return disk_stats
+
+def _parse_reduced_proc_meminfo_log(file):
+ """
+ Parse file for global memory statistics with
+ 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree' values
+ (in that order) directly stored on one line.
+ """
+ used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
+
+ mem_stats = []
+ for time, lines in _parse_timed_blocks(file):
+ sample = MemSample(time)
+ for name, value in zip(used_values, lines[0].split()):
+ sample.add_value(name, int(value))
+
+ if sample.valid():
+ mem_stats.append(DrawMemSample(sample))
+
+ return mem_stats
+
+def _parse_proc_meminfo_log(file):
+ """
+ Parse file for global memory statistics.
+ The format of relevant lines should be: ^key: value( unit)?
+ """
+ used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
+
+ mem_stats = []
+ meminfo_re = re.compile(r'([^ \t:]+):\s*(\d+).*')
+
+ for time, lines in _parse_timed_blocks(file):
+ sample = MemSample(time)
+
+ for line in lines:
+ match = meminfo_re.match(line)
+ if not match:
+ raise ParseError("Invalid meminfo line \"%s\"" % line)
+ sample.add_value(match.group(1), int(match.group(2)))
+
+ if sample.valid():
+ mem_stats.append(DrawMemSample(sample))
+
+ return mem_stats
+
+def _parse_monitor_disk_log(file):
+ """
+ Parse file with information about amount of diskspace used.
+ The format of relevant lines should be: ^volume path: number-of-bytes?
+ """
+ disk_stats = []
+ diskinfo_re = re.compile(r'^(.+):\s*(\d+)$')
+
+ for time, lines in _parse_timed_blocks(file):
+ sample = DiskSpaceSample(time)
+
+ for line in lines:
+ match = diskinfo_re.match(line)
+ if not match:
+ raise ParseError("Invalid monitor_disk line \"%s\"" % line)
+ sample.add_value(match.group(1), int(match.group(2)))
+
+ if sample.valid():
+ disk_stats.append(sample)
+
+ return disk_stats
+
+
+# if we boot the kernel with: initcall_debug printk.time=1 we can
+# get all manner of interesting data from the dmesg output
+# We turn this into a pseudo-process tree: each event is
+# characterised by a
+# we don't try to detect a "kernel finished" state - since the kernel
+# continues to do interesting things after init is called.
+#
+# sample input:
+# [ 0.000000] ACPI: FACP 3f4fc000 000F4 (v04 INTEL Napa 00000001 MSFT 01000013)
+# ...
+# [ 0.039993] calling migration_init+0x0/0x6b @ 1
+# [ 0.039993] initcall migration_init+0x0/0x6b returned 1 after 0 usecs
+def _parse_dmesg(writer, file):
+ timestamp_re = re.compile ("^\[\s*(\d+\.\d+)\s*]\s+(.*)$")
+ split_re = re.compile ("^(\S+)\s+([\S\+_-]+) (.*)$")
+ processMap = {}
+ idx = 0
+ inc = 1.0 / 1000000
+ kernel = Process(writer, idx, "k-boot", 0, 0.1)
+ processMap['k-boot'] = kernel
+ base_ts = False
+ max_ts = 0
+ for line in file.read().decode('utf-8').split('\n'):
+ t = timestamp_re.match (line)
+ if t is None:
+# print "duff timestamp " + line
+ continue
+
+ time_ms = float (t.group(1)) * 1000
+ # looks like we may have a huge diff after the clock
+ # has been set up. This could lead to huge graph:
+ # so huge we will be killed by the OOM.
+ # So instead of using the plain timestamp we will
+ # use a delta to first one and skip the first one
+ # for convenience
+ if max_ts == 0 and not base_ts and time_ms > 1000:
+ base_ts = time_ms
+ continue
+ max_ts = max(time_ms, max_ts)
+ if base_ts:
+# print "fscked clock: used %f instead of %f" % (time_ms - base_ts, time_ms)
+ time_ms -= base_ts
+ m = split_re.match (t.group(2))
+
+ if m is None:
+ continue
+# print "match: '%s'" % (m.group(1))
+ type = m.group(1)
+ func = m.group(2)
+ rest = m.group(3)
+
+ if t.group(2).startswith ('Write protecting the') or \
+ t.group(2).startswith ('Freeing unused kernel memory'):
+ kernel.duration = time_ms / 10
+ continue
+
+# print "foo: '%s' '%s' '%s'" % (type, func, rest)
+ if type == "calling":
+ ppid = kernel.pid
+ p = re.match ("\@ (\d+)", rest)
+ if p is not None:
+ ppid = float (p.group(1)) // 1000
+# print "match: '%s' ('%g') at '%s'" % (func, ppid, time_ms)
+ name = func.split ('+', 1) [0]
+ idx += inc
+ processMap[func] = Process(writer, ppid + idx, name, ppid, time_ms / 10)
+ elif type == "initcall":
+# print "finished: '%s' at '%s'" % (func, time_ms)
+ if func in processMap:
+ process = processMap[func]
+ process.duration = (time_ms / 10) - process.start_time
+ else:
+ print("corrupted init call for %s" % (func))
+
+ elif type == "async_waiting" or type == "async_continuing":
+ continue # ignore
+
+ return processMap.values()
+
+#
+# Parse binary pacct accounting file output if we have one
+# cf. /usr/include/linux/acct.h
+#
+def _parse_pacct(writer, file):
+ # read LE int32
+ def _read_le_int32(file):
+ byts = file.read(4)
+ return (ord(byts[0])) | (ord(byts[1]) << 8) | \
+ (ord(byts[2]) << 16) | (ord(byts[3]) << 24)
+
+ parent_map = {}
+ parent_map[0] = 0
+ while file.read(1) != "": # ignore flags
+ ver = file.read(1)
+ if ord(ver) < 3:
+ print("Invalid version 0x%x" % (ord(ver)))
+ return None
+
+ file.seek (14, 1) # user, group etc.
+ pid = _read_le_int32 (file)
+ ppid = _read_le_int32 (file)
+# print "Parent of %d is %d" % (pid, ppid)
+ parent_map[pid] = ppid
+ file.seek (4 + 4 + 16, 1) # timings
+ file.seek (16, 1) # acct_comm
+ return parent_map
+
+def _parse_paternity_log(writer, file):
+ parent_map = {}
+ parent_map[0] = 0
+ for line in file.read().decode('utf-8').split('\n'):
+ if not line:
+ continue
+ elems = line.split(' ') # <Child> <Parent>
+ if len (elems) >= 2:
+# print "paternity of %d is %d" % (int(elems[0]), int(elems[1]))
+ parent_map[int(elems[0])] = int(elems[1])
+ else:
+ print("Odd paternity line '%s'" % (line))
+ return parent_map
+
+def _parse_cmdline_log(writer, file):
+ cmdLines = {}
+ for block in file.read().decode('utf-8').split('\n\n'):
+ lines = block.split('\n')
+ if len (lines) >= 3:
+# print "Lines '%s'" % (lines[0])
+ pid = int (lines[0])
+ values = {}
+ values['exe'] = lines[1].lstrip(':')
+ args = lines[2].lstrip(':').split('\0')
+ args.pop()
+ values['args'] = args
+ cmdLines[pid] = values
+ return cmdLines
+
+def _parse_bitbake_buildstats(writer, state, filename, file):
+ paths = filename.split("/")
+ task = paths[-1]
+ pn = paths[-2]
+ start = None
+ end = None
+ for line in file:
+ if line.startswith("Started:"):
+ start = int(float(line.split()[-1]))
+ elif line.startswith("Ended:"):
+ end = int(float(line.split()[-1]))
+ if start and end:
+ state.add_process(pn + ":" + task, start, end)
+
+def get_num_cpus(headers):
+ """Get the number of CPUs from the system.cpu header property. As the
+ CPU utilization graphs are relative, the number of CPUs currently makes
+ no difference."""
+ if headers is None:
+ return 1
+ if headers.get("system.cpu.num"):
+ return max (int (headers.get("system.cpu.num")), 1)
+ cpu_model = headers.get("system.cpu")
+ if cpu_model is None:
+ return 1
+ mat = re.match(".*\\((\\d+)\\)", cpu_model)
+ if mat is None:
+ return 1
+ return max (int(mat.group(1)), 1)
+
+def _do_parse(writer, state, filename, file):
+ writer.info("parsing '%s'" % filename)
+ t1 = clock()
+ name = os.path.basename(filename)
+ if name == "proc_diskstats.log":
+ state.disk_stats = _parse_proc_disk_stat_log(file)
+ elif name == "reduced_proc_diskstats.log":
+ state.disk_stats = _parse_reduced_log(file, DiskSample)
+ elif name == "proc_stat.log":
+ state.cpu_stats = _parse_proc_stat_log(file)
+ elif name == "reduced_proc_stat.log":
+ state.cpu_stats = _parse_reduced_log(file, CPUSample)
+ elif name == "proc_meminfo.log":
+ state.mem_stats = _parse_proc_meminfo_log(file)
+ elif name == "reduced_proc_meminfo.log":
+ state.mem_stats = _parse_reduced_proc_meminfo_log(file)
+ elif name == "cmdline2.log":
+ state.cmdline = _parse_cmdline_log(writer, file)
+ elif name == "monitor_disk.log":
+ state.monitor_disk = _parse_monitor_disk_log(file)
+ elif not filename.endswith('.log'):
+ _parse_bitbake_buildstats(writer, state, filename, file)
+ t2 = clock()
+ writer.info(" %s seconds" % str(t2-t1))
+ return state
+
+def parse_file(writer, state, filename):
+ if state.filename is None:
+ state.filename = filename
+ basename = os.path.basename(filename)
+ with open(filename, "rb") as file:
+ return _do_parse(writer, state, filename, file)
+
+def parse_paths(writer, state, paths):
+ for path in paths:
+ if state.filename is None:
+ state.filename = path
+ root, extension = os.path.splitext(path)
+ if not(os.path.exists(path)):
+ writer.warn("warning: path '%s' does not exist, ignoring." % path)
+ continue
+ #state.filename = path
+ if os.path.isdir(path):
+ files = sorted([os.path.join(path, f) for f in os.listdir(path)])
+ state = parse_paths(writer, state, files)
+ elif extension in [".tar", ".tgz", ".gz"]:
+ if extension == ".gz":
+ root, extension = os.path.splitext(root)
+ if extension != ".tar":
+ writer.warn("warning: can only handle zipped tar files, not zipped '%s'-files; ignoring" % extension)
+ continue
+ tf = None
+ try:
+ writer.status("parsing '%s'" % path)
+ tf = tarfile.open(path, 'r:*')
+ for name in tf.getnames():
+ state = _do_parse(writer, state, name, tf.extractfile(name))
+ except tarfile.ReadError as error:
+ raise ParseError("error: could not read tarfile '%s': %s." % (path, error))
+ finally:
+ if tf != None:
+ tf.close()
+ else:
+ state = parse_file(writer, state, path)
+ return state
+
+def split_res(res, options):
+ """ Split the res into n pieces """
+ res_list = []
+ if options.num > 1:
+ s_list = sorted(res.start.keys())
+ frag_size = len(s_list) / float(options.num)
+ # Need the top value
+ if frag_size > int(frag_size):
+ frag_size = int(frag_size + 1)
+ else:
+ frag_size = int(frag_size)
+
+ start = 0
+ end = frag_size
+ while start < end:
+ state = Trace(None, [], None)
+ if options.full_time:
+ state.min = min(res.start.keys())
+ state.max = max(res.end.keys())
+ for i in range(start, end):
+ # Add this line for reference
+ #state.add_process(pn + ":" + task, start, end)
+ for p in res.start[s_list[i]]:
+ state.add_process(p, s_list[i], res.processes[p][1])
+ start = end
+ end = end + frag_size
+ if end > len(s_list):
+ end = len(s_list)
+ res_list.append(state)
+ else:
+ res_list.append(res)
+ return res_list
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/process_tree.py b/poky/scripts/pybootchartgui/pybootchartgui/process_tree.py
new file mode 100644
index 000000000..cf88110b1
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/process_tree.py
@@ -0,0 +1,292 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+class ProcessTree:
+ """ProcessTree encapsulates a process tree. The tree is built from log files
+ retrieved during the boot process. When building the process tree, it is
+ pruned and merged in order to be able to visualize it in a comprehensible
+ manner.
+
+ The following pruning techniques are used:
+
+ * idle processes that keep running during the last process sample
+ (which is a heuristic for a background processes) are removed,
+ * short-lived processes (i.e. processes that only live for the
+ duration of two samples or less) are removed,
+ * the processes used by the boot logger are removed,
+ * exploders (i.e. processes that are known to spawn huge meaningless
+ process subtrees) have their subtrees merged together,
+ * siblings (i.e. processes with the same command line living
+ concurrently -- thread heuristic) are merged together,
+ * process runs (unary trees with processes sharing the command line)
+ are merged together.
+
+ """
+ LOGGER_PROC = 'bootchart-colle'
+ EXPLODER_PROCESSES = set(['hwup'])
+
+ def __init__(self, writer, kernel, psstats, sample_period,
+ monitoredApp, prune, idle, taskstats,
+ accurate_parentage, for_testing = False):
+ self.writer = writer
+ self.process_tree = []
+ self.taskstats = taskstats
+ if psstats is None:
+ process_list = kernel
+ elif kernel is None:
+ process_list = psstats.process_map.values()
+ else:
+ process_list = list(kernel) + list(psstats.process_map.values())
+ self.process_list = sorted(process_list, key = lambda p: p.pid)
+ self.sample_period = sample_period
+
+ self.build()
+ if not accurate_parentage:
+ self.update_ppids_for_daemons(self.process_list)
+
+ self.start_time = self.get_start_time(self.process_tree)
+ self.end_time = self.get_end_time(self.process_tree)
+ self.duration = self.end_time - self.start_time
+ self.idle = idle
+
+ if for_testing:
+ return
+
+ removed = self.merge_logger(self.process_tree, self.LOGGER_PROC, monitoredApp, False)
+ writer.status("merged %i logger processes" % removed)
+
+ if prune:
+ p_processes = self.prune(self.process_tree, None)
+ p_exploders = self.merge_exploders(self.process_tree, self.EXPLODER_PROCESSES)
+ p_threads = self.merge_siblings(self.process_tree)
+ p_runs = self.merge_runs(self.process_tree)
+ writer.status("pruned %i process, %i exploders, %i threads, and %i runs" % (p_processes, p_exploders, p_threads, p_runs))
+
+ self.sort(self.process_tree)
+
+ self.start_time = self.get_start_time(self.process_tree)
+ self.end_time = self.get_end_time(self.process_tree)
+ self.duration = self.end_time - self.start_time
+
+ self.num_proc = self.num_nodes(self.process_tree)
+
+ def build(self):
+ """Build the process tree from the list of top samples."""
+ self.process_tree = []
+ for proc in self.process_list:
+ if not proc.parent:
+ self.process_tree.append(proc)
+ else:
+ proc.parent.child_list.append(proc)
+
+ def sort(self, process_subtree):
+ """Sort process tree."""
+ for p in process_subtree:
+ p.child_list.sort(key = lambda p: p.pid)
+ self.sort(p.child_list)
+
+ def num_nodes(self, process_list):
+ "Counts the number of nodes in the specified process tree."""
+ nodes = 0
+ for proc in process_list:
+ nodes = nodes + self.num_nodes(proc.child_list)
+ return nodes + len(process_list)
+
+ def get_start_time(self, process_subtree):
+ """Returns the start time of the process subtree. This is the start
+ time of the earliest process.
+
+ """
+ if not process_subtree:
+ return 100000000
+ return min( [min(proc.start_time, self.get_start_time(proc.child_list)) for proc in process_subtree] )
+
+ def get_end_time(self, process_subtree):
+ """Returns the end time of the process subtree. This is the end time
+ of the last collected sample.
+
+ """
+ if not process_subtree:
+ return -100000000
+ return max( [max(proc.start_time + proc.duration, self.get_end_time(proc.child_list)) for proc in process_subtree] )
+
+ def get_max_pid(self, process_subtree):
+ """Returns the max PID found in the process tree."""
+ if not process_subtree:
+ return -100000000
+ return max( [max(proc.pid, self.get_max_pid(proc.child_list)) for proc in process_subtree] )
+
+ def update_ppids_for_daemons(self, process_list):
+ """Fedora hack: when loading the system services from rc, runuser(1)
+ is used. This sets the PPID of all daemons to 1, skewing
+ the process tree. Try to detect this and set the PPID of
+ these processes the PID of rc.
+
+ """
+ rcstartpid = -1
+ rcendpid = -1
+ rcproc = None
+ for p in process_list:
+ if p.cmd == "rc" and p.ppid // 1000 == 1:
+ rcproc = p
+ rcstartpid = p.pid
+ rcendpid = self.get_max_pid(p.child_list)
+ if rcstartpid != -1 and rcendpid != -1:
+ for p in process_list:
+ if p.pid > rcstartpid and p.pid < rcendpid and p.ppid // 1000 == 1:
+ p.ppid = rcstartpid
+ p.parent = rcproc
+ for p in process_list:
+ p.child_list = []
+ self.build()
+
+ def prune(self, process_subtree, parent):
+ """Prunes the process tree by removing idle processes and processes
+ that only live for the duration of a single top sample. Sibling
+ processes with the same command line (i.e. threads) are merged
+ together. This filters out sleepy background processes, short-lived
+ processes and bootcharts' analysis tools.
+ """
+ def is_idle_background_process_without_children(p):
+ process_end = p.start_time + p.duration
+ return not p.active and \
+ process_end >= self.start_time + self.duration and \
+ p.start_time > self.start_time and \
+ p.duration > 0.9 * self.duration and \
+ self.num_nodes(p.child_list) == 0
+
+ num_removed = 0
+ idx = 0
+ while idx < len(process_subtree):
+ p = process_subtree[idx]
+ if parent != None or len(p.child_list) == 0:
+
+ prune = False
+ if is_idle_background_process_without_children(p):
+ prune = True
+ elif p.duration <= 2 * self.sample_period:
+ # short-lived process
+ prune = True
+
+ if prune:
+ process_subtree.pop(idx)
+ for c in p.child_list:
+ process_subtree.insert(idx, c)
+ num_removed += 1
+ continue
+ else:
+ num_removed += self.prune(p.child_list, p)
+ else:
+ num_removed += self.prune(p.child_list, p)
+ idx += 1
+
+ return num_removed
+
+ def merge_logger(self, process_subtree, logger_proc, monitored_app, app_tree):
+ """Merges the logger's process subtree. The logger will typically
+ spawn lots of sleep and cat processes, thus polluting the
+ process tree.
+
+ """
+ num_removed = 0
+ for p in process_subtree:
+ is_app_tree = app_tree
+ if logger_proc == p.cmd and not app_tree:
+ is_app_tree = True
+ num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
+ # don't remove the logger itself
+ continue
+
+ if app_tree and monitored_app != None and monitored_app == p.cmd:
+ is_app_tree = False
+
+ if is_app_tree:
+ for child in p.child_list:
+ self.merge_processes(p, child)
+ num_removed += 1
+ p.child_list = []
+ else:
+ num_removed += self.merge_logger(p.child_list, logger_proc, monitored_app, is_app_tree)
+ return num_removed
+
+ def merge_exploders(self, process_subtree, processes):
+ """Merges specific process subtrees (used for processes which usually
+ spawn huge meaningless process trees).
+
+ """
+ num_removed = 0
+ for p in process_subtree:
+ if processes in processes and len(p.child_list) > 0:
+ subtreemap = self.getProcessMap(p.child_list)
+ for child in subtreemap.values():
+ self.merge_processes(p, child)
+ num_removed += len(subtreemap)
+ p.child_list = []
+ p.cmd += " (+)"
+ else:
+ num_removed += self.merge_exploders(p.child_list, processes)
+ return num_removed
+
+ def merge_siblings(self, process_subtree):
+ """Merges thread processes. Sibling processes with the same command
+ line are merged together.
+
+ """
+ num_removed = 0
+ idx = 0
+ while idx < len(process_subtree)-1:
+ p = process_subtree[idx]
+ nextp = process_subtree[idx+1]
+ if nextp.cmd == p.cmd:
+ process_subtree.pop(idx+1)
+ idx -= 1
+ num_removed += 1
+ p.child_list.extend(nextp.child_list)
+ self.merge_processes(p, nextp)
+ num_removed += self.merge_siblings(p.child_list)
+ idx += 1
+ if len(process_subtree) > 0:
+ p = process_subtree[-1]
+ num_removed += self.merge_siblings(p.child_list)
+ return num_removed
+
+ def merge_runs(self, process_subtree):
+ """Merges process runs. Single child processes which share the same
+ command line with the parent are merged.
+
+ """
+ num_removed = 0
+ idx = 0
+ while idx < len(process_subtree):
+ p = process_subtree[idx]
+ if len(p.child_list) == 1 and p.child_list[0].cmd == p.cmd:
+ child = p.child_list[0]
+ p.child_list = list(child.child_list)
+ self.merge_processes(p, child)
+ num_removed += 1
+ continue
+ num_removed += self.merge_runs(p.child_list)
+ idx += 1
+ return num_removed
+
+ def merge_processes(self, p1, p2):
+ """Merges two process' samples."""
+ p1.samples.extend(p2.samples)
+ p1.samples.sort( key = lambda p: p.time )
+ p1time = p1.start_time
+ p2time = p2.start_time
+ p1.start_time = min(p1time, p2time)
+ pendtime = max(p1time + p1.duration, p2time + p2.duration)
+ p1.duration = pendtime - p1.start_time
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/samples.py b/poky/scripts/pybootchartgui/pybootchartgui/samples.py
new file mode 100644
index 000000000..9fc309b3a
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/samples.py
@@ -0,0 +1,178 @@
+# This file is part of pybootchartgui.
+
+# pybootchartgui is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# pybootchartgui is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
+
+
+class DiskStatSample:
+ def __init__(self, time):
+ self.time = time
+ self.diskdata = [0, 0, 0]
+ def add_diskdata(self, new_diskdata):
+ self.diskdata = [ a + b for a, b in zip(self.diskdata, new_diskdata) ]
+
+class CPUSample:
+ def __init__(self, time, user, sys, io = 0.0, swap = 0.0):
+ self.time = time
+ self.user = user
+ self.sys = sys
+ self.io = io
+ self.swap = swap
+
+ @property
+ def cpu(self):
+ return self.user + self.sys
+
+ def __str__(self):
+ return str(self.time) + "\t" + str(self.user) + "\t" + \
+ str(self.sys) + "\t" + str(self.io) + "\t" + str (self.swap)
+
+class MemSample:
+ used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
+
+ def __init__(self, time):
+ self.time = time
+ self.records = {}
+
+ def add_value(self, name, value):
+ if name in MemSample.used_values:
+ self.records[name] = value
+
+ def valid(self):
+ keys = self.records.keys()
+ # discard incomplete samples
+ return [v for v in MemSample.used_values if v not in keys] == []
+
+class DrawMemSample:
+ """
+ Condensed version of a MemSample with exactly the values used by the drawing code.
+ Initialized either from a valid MemSample or
+ a tuple/list of buffer/used/cached/swap values.
+ """
+ def __init__(self, mem_sample):
+ self.time = mem_sample.time
+ if isinstance(mem_sample, MemSample):
+ self.buffers = mem_sample.records['MemTotal'] - mem_sample.records['MemFree']
+ self.used = mem_sample.records['MemTotal'] - mem_sample.records['MemFree'] - mem_sample.records['Buffers']
+ self.cached = mem_sample.records['Cached']
+ self.swap = mem_sample.records['SwapTotal'] - mem_sample.records['SwapFree']
+ else:
+ self.buffers, self.used, self.cached, self.swap = mem_sample
+
+class DiskSpaceSample:
+ def __init__(self, time):
+ self.time = time
+ self.records = {}
+
+ def add_value(self, name, value):
+ self.records[name] = value
+
+ def valid(self):
+ return bool(self.records)
+
+class ProcessSample:
+ def __init__(self, time, state, cpu_sample):
+ self.time = time
+ self.state = state
+ self.cpu_sample = cpu_sample
+
+ def __str__(self):
+ return str(self.time) + "\t" + str(self.state) + "\t" + str(self.cpu_sample)
+
+class ProcessStats:
+ def __init__(self, writer, process_map, sample_count, sample_period, start_time, end_time):
+ self.process_map = process_map
+ self.sample_count = sample_count
+ self.sample_period = sample_period
+ self.start_time = start_time
+ self.end_time = end_time
+ writer.info ("%d samples, avg. sample length %f" % (self.sample_count, self.sample_period))
+ writer.info ("process list size: %d" % len (self.process_map.values()))
+
+class Process:
+ def __init__(self, writer, pid, cmd, ppid, start_time):
+ self.writer = writer
+ self.pid = pid
+ self.cmd = cmd
+ self.exe = cmd
+ self.args = []
+ self.ppid = ppid
+ self.start_time = start_time
+ self.duration = 0
+ self.samples = []
+ self.parent = None
+ self.child_list = []
+
+ self.active = None
+ self.last_user_cpu_time = None
+ self.last_sys_cpu_time = None
+
+ self.last_cpu_ns = 0
+ self.last_blkio_delay_ns = 0
+ self.last_swapin_delay_ns = 0
+
+ # split this process' run - triggered by a name change
+ def split(self, writer, pid, cmd, ppid, start_time):
+ split = Process (writer, pid, cmd, ppid, start_time)
+
+ split.last_cpu_ns = self.last_cpu_ns
+ split.last_blkio_delay_ns = self.last_blkio_delay_ns
+ split.last_swapin_delay_ns = self.last_swapin_delay_ns
+
+ return split
+
+ def __str__(self):
+ return " ".join([str(self.pid), self.cmd, str(self.ppid), '[ ' + str(len(self.samples)) + ' samples ]' ])
+
+ def calc_stats(self, samplePeriod):
+ if self.samples:
+ firstSample = self.samples[0]
+ lastSample = self.samples[-1]
+ self.start_time = min(firstSample.time, self.start_time)
+ self.duration = lastSample.time - self.start_time + samplePeriod
+
+ activeCount = sum( [1 for sample in self.samples if sample.cpu_sample and sample.cpu_sample.sys + sample.cpu_sample.user + sample.cpu_sample.io > 0.0] )
+ activeCount = activeCount + sum( [1 for sample in self.samples if sample.state == 'D'] )
+ self.active = (activeCount>2)
+
+ def calc_load(self, userCpu, sysCpu, interval):
+ userCpuLoad = float(userCpu - self.last_user_cpu_time) / interval
+ sysCpuLoad = float(sysCpu - self.last_sys_cpu_time) / interval
+ cpuLoad = userCpuLoad + sysCpuLoad
+ # normalize
+ if cpuLoad > 1.0:
+ userCpuLoad = userCpuLoad / cpuLoad
+ sysCpuLoad = sysCpuLoad / cpuLoad
+ return (userCpuLoad, sysCpuLoad)
+
+ def set_parent(self, processMap):
+ if self.ppid != None:
+ self.parent = processMap.get (self.ppid)
+ if self.parent == None and self.pid // 1000 > 1 and \
+ not (self.ppid == 2000 or self.pid == 2000): # kernel threads: ppid=2
+ self.writer.warn("Missing CONFIG_PROC_EVENTS: no parent for pid '%i' ('%s') with ppid '%i'" \
+ % (self.pid,self.cmd,self.ppid))
+
+ def get_end_time(self):
+ return self.start_time + self.duration
+
+class DiskSample:
+ def __init__(self, time, read, write, util):
+ self.time = time
+ self.read = read
+ self.write = write
+ self.util = util
+ self.tput = read + write
+
+ def __str__(self):
+ return "\t".join([str(self.time), str(self.read), str(self.write), str(self.util)])
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py b/poky/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py
new file mode 100644
index 000000000..00fb3bf79
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/tests/parser_test.py
@@ -0,0 +1,105 @@
+import sys, os, re, struct, operator, math
+from collections import defaultdict
+import unittest
+
+sys.path.insert(0, os.getcwd())
+
+import pybootchartgui.parsing as parsing
+import pybootchartgui.main as main
+
+debug = False
+
+def floatEq(f1, f2):
+ return math.fabs(f1-f2) < 0.00001
+
+bootchart_dir = os.path.join(os.path.dirname(sys.argv[0]), '../../examples/1/')
+parser = main._mk_options_parser()
+options, args = parser.parse_args(['--q', bootchart_dir])
+writer = main._mk_writer(options)
+
+class TestBCParser(unittest.TestCase):
+
+ def setUp(self):
+ self.name = "My first unittest"
+ self.rootdir = bootchart_dir
+
+ def mk_fname(self,f):
+ return os.path.join(self.rootdir, f)
+
+ def testParseHeader(self):
+ trace = parsing.Trace(writer, args, options)
+ state = parsing.parse_file(writer, trace, self.mk_fname('header'))
+ self.assertEqual(6, len(state.headers))
+ self.assertEqual(2, parsing.get_num_cpus(state.headers))
+
+ def test_parseTimedBlocks(self):
+ trace = parsing.Trace(writer, args, options)
+ state = parsing.parse_file(writer, trace, self.mk_fname('proc_diskstats.log'))
+ self.assertEqual(141, len(state.disk_stats))
+
+ def testParseProcPsLog(self):
+ trace = parsing.Trace(writer, args, options)
+ state = parsing.parse_file(writer, trace, self.mk_fname('proc_ps.log'))
+ samples = state.ps_stats
+ processes = samples.process_map
+ sorted_processes = [processes[k] for k in sorted(processes.keys())]
+
+ ps_data = open(self.mk_fname('extract2.proc_ps.log'))
+ for index, line in enumerate(ps_data):
+ tokens = line.split();
+ process = sorted_processes[index]
+ if debug:
+ print(tokens[0:4])
+ print(process.pid / 1000, process.cmd, process.ppid, len(process.samples))
+ print('-------------------')
+
+ self.assertEqual(tokens[0], str(process.pid // 1000))
+ self.assertEqual(tokens[1], str(process.cmd))
+ self.assertEqual(tokens[2], str(process.ppid // 1000))
+ self.assertEqual(tokens[3], str(len(process.samples)))
+ ps_data.close()
+
+ def testparseProcDiskStatLog(self):
+ trace = parsing.Trace(writer, args, options)
+ state_with_headers = parsing.parse_file(writer, trace, self.mk_fname('header'))
+ state_with_headers.headers['system.cpu'] = 'xxx (2)'
+ samples = parsing.parse_file(writer, state_with_headers, self.mk_fname('proc_diskstats.log')).disk_stats
+ self.assertEqual(141, len(samples))
+
+ diskstats_data = open(self.mk_fname('extract.proc_diskstats.log'))
+ for index, line in enumerate(diskstats_data):
+ tokens = line.split('\t')
+ sample = samples[index]
+ if debug:
+ print(line.rstrip())
+ print(sample)
+ print('-------------------')
+
+ self.assertEqual(tokens[0], str(sample.time))
+ self.assert_(floatEq(float(tokens[1]), sample.read))
+ self.assert_(floatEq(float(tokens[2]), sample.write))
+ self.assert_(floatEq(float(tokens[3]), sample.util))
+ diskstats_data.close()
+
+ def testparseProcStatLog(self):
+ trace = parsing.Trace(writer, args, options)
+ samples = parsing.parse_file(writer, trace, self.mk_fname('proc_stat.log')).cpu_stats
+ self.assertEqual(141, len(samples))
+
+ stat_data = open(self.mk_fname('extract.proc_stat.log'))
+ for index, line in enumerate(stat_data):
+ tokens = line.split('\t')
+ sample = samples[index]
+ if debug:
+ print(line.rstrip())
+ print(sample)
+ print('-------------------')
+ self.assert_(floatEq(float(tokens[0]), sample.time))
+ self.assert_(floatEq(float(tokens[1]), sample.user))
+ self.assert_(floatEq(float(tokens[2]), sample.sys))
+ self.assert_(floatEq(float(tokens[3]), sample.io))
+ stat_data.close()
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/poky/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py b/poky/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py
new file mode 100644
index 000000000..6f46a1c03
--- /dev/null
+++ b/poky/scripts/pybootchartgui/pybootchartgui/tests/process_tree_test.py
@@ -0,0 +1,92 @@
+import sys
+import os
+import unittest
+
+sys.path.insert(0, os.getcwd())
+
+import pybootchartgui.parsing as parsing
+import pybootchartgui.process_tree as process_tree
+import pybootchartgui.main as main
+
+if sys.version_info >= (3, 0):
+ long = int
+
+class TestProcessTree(unittest.TestCase):
+
+ def setUp(self):
+ self.name = "Process tree unittest"
+ self.rootdir = os.path.join(os.path.dirname(sys.argv[0]), '../../examples/1/')
+
+ parser = main._mk_options_parser()
+ options, args = parser.parse_args(['--q', self.rootdir])
+ writer = main._mk_writer(options)
+ trace = parsing.Trace(writer, args, options)
+
+ parsing.parse_file(writer, trace, self.mk_fname('proc_ps.log'))
+ trace.compile(writer)
+ self.processtree = process_tree.ProcessTree(writer, None, trace.ps_stats, \
+ trace.ps_stats.sample_period, None, options.prune, None, None, False, for_testing = True)
+
+ def mk_fname(self,f):
+ return os.path.join(self.rootdir, f)
+
+ def flatten(self, process_tree):
+ flattened = []
+ for p in process_tree:
+ flattened.append(p)
+ flattened.extend(self.flatten(p.child_list))
+ return flattened
+
+ def checkAgainstJavaExtract(self, filename, process_tree):
+ test_data = open(filename)
+ for expected, actual in zip(test_data, self.flatten(process_tree)):
+ tokens = expected.split('\t')
+ self.assertEqual(int(tokens[0]), actual.pid // 1000)
+ self.assertEqual(tokens[1], actual.cmd)
+ self.assertEqual(long(tokens[2]), 10 * actual.start_time)
+ self.assert_(long(tokens[3]) - 10 * actual.duration < 5, "duration")
+ self.assertEqual(int(tokens[4]), len(actual.child_list))
+ self.assertEqual(int(tokens[5]), len(actual.samples))
+ test_data.close()
+
+ def testBuild(self):
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.1.log'), process_tree)
+
+ def testMergeLogger(self):
+ self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.2.log'), process_tree)
+
+ def testPrune(self):
+ self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
+ self.processtree.prune(self.processtree.process_tree, None)
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3b.log'), process_tree)
+
+ def testMergeExploders(self):
+ self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
+ self.processtree.prune(self.processtree.process_tree, None)
+ self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3c.log'), process_tree)
+
+ def testMergeSiblings(self):
+ self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
+ self.processtree.prune(self.processtree.process_tree, None)
+ self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
+ self.processtree.merge_siblings(self.processtree.process_tree)
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3d.log'), process_tree)
+
+ def testMergeRuns(self):
+ self.processtree.merge_logger(self.processtree.process_tree, 'bootchartd', None, False)
+ self.processtree.prune(self.processtree.process_tree, None)
+ self.processtree.merge_exploders(self.processtree.process_tree, set(['hwup']))
+ self.processtree.merge_siblings(self.processtree.process_tree)
+ self.processtree.merge_runs(self.processtree.process_tree)
+ process_tree = self.processtree.process_tree
+ self.checkAgainstJavaExtract(self.mk_fname('extract.processtree.3e.log'), process_tree)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/poky/scripts/pythondeps b/poky/scripts/pythondeps
new file mode 100755
index 000000000..590b9769e
--- /dev/null
+++ b/poky/scripts/pythondeps
@@ -0,0 +1,250 @@
+#!/usr/bin/env python3
+#
+# Determine dependencies of python scripts or available python modules in a search path.
+#
+# Given the -d argument and a filename/filenames, returns the modules imported by those files.
+# Given the -d argument and a directory/directories, recurses to find all
+# python packages and modules, returns the modules imported by these.
+# Given the -p argument and a path or paths, scans that path for available python modules/packages.
+
+import argparse
+import ast
+import imp
+import logging
+import os.path
+import sys
+
+
+logger = logging.getLogger('pythondeps')
+
+suffixes = []
+for triple in imp.get_suffixes():
+ suffixes.append(triple[0])
+
+
+class PythonDepError(Exception):
+ pass
+
+
+class DependError(PythonDepError):
+ def __init__(self, path, error):
+ self.path = path
+ self.error = error
+ PythonDepError.__init__(self, error)
+
+ def __str__(self):
+ return "Failure determining dependencies of {}: {}".format(self.path, self.error)
+
+
+class ImportVisitor(ast.NodeVisitor):
+ def __init__(self):
+ self.imports = set()
+ self.importsfrom = []
+
+ def visit_Import(self, node):
+ for alias in node.names:
+ self.imports.add(alias.name)
+
+ def visit_ImportFrom(self, node):
+ self.importsfrom.append((node.module, [a.name for a in node.names], node.level))
+
+
+def walk_up(path):
+ while path:
+ yield path
+ path, _, _ = path.rpartition(os.sep)
+
+
+def get_provides(path):
+ path = os.path.realpath(path)
+
+ def get_fn_name(fn):
+ for suffix in suffixes:
+ if fn.endswith(suffix):
+ return fn[:-len(suffix)]
+
+ isdir = os.path.isdir(path)
+ if isdir:
+ pkg_path = path
+ walk_path = path
+ else:
+ pkg_path = get_fn_name(path)
+ if pkg_path is None:
+ return
+ walk_path = os.path.dirname(path)
+
+ for curpath in walk_up(walk_path):
+ if not os.path.exists(os.path.join(curpath, '__init__.py')):
+ libdir = curpath
+ break
+ else:
+ libdir = ''
+
+ package_relpath = pkg_path[len(libdir)+1:]
+ package = '.'.join(package_relpath.split(os.sep))
+ if not isdir:
+ yield package, path
+ else:
+ if os.path.exists(os.path.join(path, '__init__.py')):
+ yield package, path
+
+ for dirpath, dirnames, filenames in os.walk(path):
+ relpath = dirpath[len(path)+1:]
+ if relpath:
+ if '__init__.py' not in filenames:
+ dirnames[:] = []
+ continue
+ else:
+ context = '.'.join(relpath.split(os.sep))
+ if package:
+ context = package + '.' + context
+ yield context, dirpath
+ else:
+ context = package
+
+ for fn in filenames:
+ adjusted_fn = get_fn_name(fn)
+ if not adjusted_fn or adjusted_fn == '__init__':
+ continue
+
+ fullfn = os.path.join(dirpath, fn)
+ if context:
+ yield context + '.' + adjusted_fn, fullfn
+ else:
+ yield adjusted_fn, fullfn
+
+
+def get_code_depends(code_string, path=None, provide=None, ispkg=False):
+ try:
+ code = ast.parse(code_string, path)
+ except TypeError as exc:
+ raise DependError(path, exc)
+ except SyntaxError as exc:
+ raise DependError(path, exc)
+
+ visitor = ImportVisitor()
+ visitor.visit(code)
+ for builtin_module in sys.builtin_module_names:
+ if builtin_module in visitor.imports:
+ visitor.imports.remove(builtin_module)
+
+ if provide:
+ provide_elements = provide.split('.')
+ if ispkg:
+ provide_elements.append("__self__")
+ context = '.'.join(provide_elements[:-1])
+ package_path = os.path.dirname(path)
+ else:
+ context = None
+ package_path = None
+
+ levelzero_importsfrom = (module for module, names, level in visitor.importsfrom
+ if level == 0)
+ for module in visitor.imports | set(levelzero_importsfrom):
+ if context and path:
+ module_basepath = os.path.join(package_path, module.replace('.', '/'))
+ if os.path.exists(module_basepath):
+ # Implicit relative import
+ yield context + '.' + module, path
+ continue
+
+ for suffix in suffixes:
+ if os.path.exists(module_basepath + suffix):
+ # Implicit relative import
+ yield context + '.' + module, path
+ break
+ else:
+ yield module, path
+ else:
+ yield module, path
+
+ for module, names, level in visitor.importsfrom:
+ if level == 0:
+ continue
+ elif not provide:
+ raise DependError("Error: ImportFrom non-zero level outside of a package: {0}".format((module, names, level)), path)
+ elif level > len(provide_elements):
+ raise DependError("Error: ImportFrom level exceeds package depth: {0}".format((module, names, level)), path)
+ else:
+ context = '.'.join(provide_elements[:-level])
+ if module:
+ if context:
+ yield context + '.' + module, path
+ else:
+ yield module, path
+
+
+def get_file_depends(path):
+ try:
+ code_string = open(path, 'r').read()
+ except (OSError, IOError) as exc:
+ raise DependError(path, exc)
+
+ return get_code_depends(code_string, path)
+
+
+def get_depends_recursive(directory):
+ directory = os.path.realpath(directory)
+
+ provides = dict((v, k) for k, v in get_provides(directory))
+ for filename, provide in provides.items():
+ if os.path.isdir(filename):
+ filename = os.path.join(filename, '__init__.py')
+ ispkg = True
+ elif not filename.endswith('.py'):
+ continue
+ else:
+ ispkg = False
+
+ with open(filename, 'r') as f:
+ source = f.read()
+
+ depends = get_code_depends(source, filename, provide, ispkg)
+ for depend, by in depends:
+ yield depend, by
+
+
+def get_depends(path):
+ if os.path.isdir(path):
+ return get_depends_recursive(path)
+ else:
+ return get_file_depends(path)
+
+
+def main():
+ logging.basicConfig()
+
+ parser = argparse.ArgumentParser(description='Determine dependencies and provided packages for python scripts/modules')
+ parser.add_argument('path', nargs='+', help='full path to content to be processed')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('-p', '--provides', action='store_true',
+ help='given a path, display the provided python modules')
+ group.add_argument('-d', '--depends', action='store_true',
+ help='given a filename, display the imported python modules')
+
+ args = parser.parse_args()
+ if args.provides:
+ modules = set()
+ for path in args.path:
+ for provide, fn in get_provides(path):
+ modules.add(provide)
+
+ for module in sorted(modules):
+ print(module)
+ elif args.depends:
+ for path in args.path:
+ try:
+ modules = get_depends(path)
+ except PythonDepError as exc:
+ logger.error(str(exc))
+ sys.exit(1)
+
+ for module, imp_by in modules:
+ print("{}\t{}".format(module, imp_by))
+ else:
+ parser.print_help()
+ sys.exit(2)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/poky/scripts/recipetool b/poky/scripts/recipetool
new file mode 100755
index 000000000..3a3c9b744
--- /dev/null
+++ b/poky/scripts/recipetool
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+
+# Recipe creation tool
+#
+# Copyright (C) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import sys
+import os
+import argparse
+import glob
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import argparse_oe
+logger = scriptutils.logger_create('recipetool')
+
+plugins = []
+
+def tinfoil_init(parserecipes):
+ import bb.tinfoil
+ import logging
+ tinfoil = bb.tinfoil.Tinfoil(tracking=True)
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(not parserecipes)
+ return tinfoil
+
+def main():
+
+ if not os.environ.get('BUILDDIR', ''):
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
+
+ parser = argparse_oe.ArgumentParser(description="OpenEmbedded recipe tool",
+ add_help=False,
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
+
+ global_args, unparsed_args = parser.parse_known_args()
+
+ # Help is added here rather than via add_help=True, as we don't want it to
+ # be handled by parse_known_args()
+ parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
+ help='show this help message and exit')
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ if global_args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif global_args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ import scriptpath
+ bitbakepath = scriptpath.add_bitbake_lib_path()
+ if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+ logger.debug('Found bitbake path: %s' % bitbakepath)
+ scriptpath.add_oe_lib_path()
+
+ scriptutils.logger_setup_color(logger, global_args.color)
+
+ tinfoil = tinfoil_init(False)
+ try:
+ for path in (tinfoil.config_data.getVar('BBPATH').split(':')
+ + [scripts_path]):
+ pluginpath = os.path.join(path, 'lib', 'recipetool')
+ scriptutils.load_plugins(logger, plugins, pluginpath)
+
+ registered = False
+ for plugin in plugins:
+ if hasattr(plugin, 'register_commands'):
+ registered = True
+ plugin.register_commands(subparsers)
+ elif hasattr(plugin, 'register_command'):
+ # Legacy function name
+ registered = True
+ plugin.register_command(subparsers)
+ if hasattr(plugin, 'tinfoil_init'):
+ plugin.tinfoil_init(tinfoil)
+
+ if not registered:
+ logger.error("No commands registered - missing plugins?")
+ sys.exit(1)
+
+ args = parser.parse_args(unparsed_args, namespace=global_args)
+
+ try:
+ if getattr(args, 'parserecipes', False):
+ tinfoil.config_data.disableTracking()
+ tinfoil.parseRecipes()
+ tinfoil.config_data.enableTracking()
+ ret = args.func(args)
+ except bb.BBHandledException:
+ ret = 1
+ finally:
+ tinfoil.shutdown()
+
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/relocate_sdk.py b/poky/scripts/relocate_sdk.py
new file mode 100755
index 000000000..c752fa2c6
--- /dev/null
+++ b/poky/scripts/relocate_sdk.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2012 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# DESCRIPTION
+# This script is called by the SDK installer script. It replaces the dynamic
+# loader path in all binaries and also fixes the SYSDIR paths/lengths and the
+# location of ld.so.cache in the dynamic loader binary
+#
+# AUTHORS
+# Laurentiu Palcu <laurentiu.palcu@intel.com>
+#
+
+import struct
+import sys
+import stat
+import os
+import re
+import errno
+
+if sys.version < '3':
+ def b(x):
+ return x
+else:
+ def b(x):
+ return x.encode(sys.getfilesystemencoding())
+
+old_prefix = re.compile(b("##DEFAULT_INSTALL_DIR##"))
+
+def get_arch():
+ f.seek(0)
+ e_ident =f.read(16)
+ ei_mag0,ei_mag1_3,ei_class = struct.unpack("<B3sB11x", e_ident)
+
+ if (ei_mag0 != 0x7f and ei_mag1_3 != "ELF") or ei_class == 0:
+ return 0
+
+ if ei_class == 1:
+ return 32
+ elif ei_class == 2:
+ return 64
+
+def parse_elf_header():
+ global e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
+ e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx
+
+ f.seek(0)
+ elf_header = f.read(64)
+
+ if arch == 32:
+ # 32bit
+ hdr_fmt = "<HHILLLIHHHHHH"
+ hdr_size = 52
+ else:
+ # 64bit
+ hdr_fmt = "<HHIQQQIHHHHHH"
+ hdr_size = 64
+
+ e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
+ e_ehsize, e_phentsize, e_phnum, e_shentsize, e_shnum, e_shstrndx =\
+ struct.unpack(hdr_fmt, elf_header[16:hdr_size])
+
+def change_interpreter(elf_file_name):
+ if arch == 32:
+ ph_fmt = "<IIIIIIII"
+ else:
+ ph_fmt = "<IIQQQQQQ"
+
+ """ look for PT_INTERP section """
+ for i in range(0,e_phnum):
+ f.seek(e_phoff + i * e_phentsize)
+ ph_hdr = f.read(e_phentsize)
+ if arch == 32:
+ # 32bit
+ p_type, p_offset, p_vaddr, p_paddr, p_filesz,\
+ p_memsz, p_flags, p_align = struct.unpack(ph_fmt, ph_hdr)
+ else:
+ # 64bit
+ p_type, p_flags, p_offset, p_vaddr, p_paddr, \
+ p_filesz, p_memsz, p_align = struct.unpack(ph_fmt, ph_hdr)
+
+ """ change interpreter """
+ if p_type == 3:
+ # PT_INTERP section
+ f.seek(p_offset)
+ # External SDKs with mixed pre-compiled binaries should not get
+ # relocated so look for some variant of /lib
+ fname = f.read(11)
+ if fname.startswith(b("/lib/")) or fname.startswith(b("/lib64/")) or \
+ fname.startswith(b("/lib32/")) or fname.startswith(b("/usr/lib32/")) or \
+ fname.startswith(b("/usr/lib32/")) or fname.startswith(b("/usr/lib64/")):
+ break
+ if p_filesz == 0:
+ break
+ if (len(new_dl_path) >= p_filesz):
+ print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
+ % (elf_file_name, p_memsz, len(new_dl_path) + 1))
+ break
+ dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
+ f.seek(p_offset)
+ f.write(dl_path)
+ break
+
+def change_dl_sysdirs(elf_file_name):
+ if arch == 32:
+ sh_fmt = "<IIIIIIIIII"
+ else:
+ sh_fmt = "<IIQQQQIIQQ"
+
+ """ read section string table """
+ f.seek(e_shoff + e_shstrndx * e_shentsize)
+ sh_hdr = f.read(e_shentsize)
+ if arch == 32:
+ sh_offset, sh_size = struct.unpack("<16xII16x", sh_hdr)
+ else:
+ sh_offset, sh_size = struct.unpack("<24xQQ24x", sh_hdr)
+
+ f.seek(sh_offset)
+ sh_strtab = f.read(sh_size)
+
+ sysdirs = sysdirs_len = ""
+
+ """ change ld.so.cache path and default libs path for dynamic loader """
+ for i in range(0,e_shnum):
+ f.seek(e_shoff + i * e_shentsize)
+ sh_hdr = f.read(e_shentsize)
+
+ sh_name, sh_type, sh_flags, sh_addr, sh_offset, sh_size, sh_link,\
+ sh_info, sh_addralign, sh_entsize = struct.unpack(sh_fmt, sh_hdr)
+
+ name = sh_strtab[sh_name:sh_strtab.find(b("\0"), sh_name)]
+
+ """ look only into SHT_PROGBITS sections """
+ if sh_type == 1:
+ f.seek(sh_offset)
+ """ default library paths cannot be changed on the fly because """
+ """ the string lengths have to be changed too. """
+ if name == b(".sysdirs"):
+ sysdirs = f.read(sh_size)
+ sysdirs_off = sh_offset
+ sysdirs_sect_size = sh_size
+ elif name == b(".sysdirslen"):
+ sysdirslen = f.read(sh_size)
+ sysdirslen_off = sh_offset
+ elif name == b(".ldsocache"):
+ ldsocache_path = f.read(sh_size)
+ new_ldsocache_path = old_prefix.sub(new_prefix, ldsocache_path)
+ new_ldsocache_path = new_ldsocache_path.rstrip(b("\0"))
+ if (len(new_ldsocache_path) >= sh_size):
+ print("ERROR: could not relocate %s, .ldsocache section size = %i and %i is needed." \
+ % (elf_file_name, sh_size, len(new_ldsocache_path)))
+ sys.exit(-1)
+ # pad with zeros
+ new_ldsocache_path += b("\0") * (sh_size - len(new_ldsocache_path))
+ # write it back
+ f.seek(sh_offset)
+ f.write(new_ldsocache_path)
+ elif name == b(".gccrelocprefix"):
+ offset = 0
+ while (offset + 4096) <= sh_size:
+ path = f.read(4096)
+ new_path = old_prefix.sub(new_prefix, path)
+ new_path = new_path.rstrip(b("\0"))
+ if (len(new_path) >= 4096):
+ print("ERROR: could not relocate %s, max path size = 4096 and %i is needed." \
+ % (elf_file_name, len(new_path)))
+ sys.exit(-1)
+ # pad with zeros
+ new_path += b("\0") * (4096 - len(new_path))
+ #print "Changing %s to %s at %s" % (str(path), str(new_path), str(offset))
+ # write it back
+ f.seek(sh_offset + offset)
+ f.write(new_path)
+ offset = offset + 4096
+ if sysdirs != "" and sysdirslen != "":
+ paths = sysdirs.split(b("\0"))
+ sysdirs = b("")
+ sysdirslen = b("")
+ for path in paths:
+ """ exit the loop when we encounter first empty string """
+ if path == b(""):
+ break
+
+ new_path = old_prefix.sub(new_prefix, path)
+ sysdirs += new_path + b("\0")
+
+ if arch == 32:
+ sysdirslen += struct.pack("<L", len(new_path))
+ else:
+ sysdirslen += struct.pack("<Q", len(new_path))
+
+ """ pad with zeros """
+ sysdirs += b("\0") * (sysdirs_sect_size - len(sysdirs))
+
+ """ write the sections back """
+ f.seek(sysdirs_off)
+ f.write(sysdirs)
+ f.seek(sysdirslen_off)
+ f.write(sysdirslen)
+
+# MAIN
+if len(sys.argv) < 4:
+ sys.exit(-1)
+
+# In python > 3, strings may also contain Unicode characters. So, convert
+# them to bytes
+if sys.version_info < (3,):
+ new_prefix = sys.argv[1]
+ new_dl_path = sys.argv[2]
+else:
+ new_prefix = sys.argv[1].encode()
+ new_dl_path = sys.argv[2].encode()
+
+executables_list = sys.argv[3:]
+
+for e in executables_list:
+ perms = os.stat(e)[stat.ST_MODE]
+ if os.access(e, os.W_OK|os.R_OK):
+ perms = None
+ else:
+ os.chmod(e, perms|stat.S_IRWXU)
+
+ try:
+ f = open(e, "r+b")
+ except IOError:
+ exctype, ioex = sys.exc_info()[:2]
+ if ioex.errno == errno.ETXTBSY:
+ print("Could not open %s. File used by another process.\nPlease "\
+ "make sure you exit all processes that might use any SDK "\
+ "binaries." % e)
+ else:
+ print("Could not open %s: %s(%d)" % (e, ioex.strerror, ioex.errno))
+ sys.exit(-1)
+
+ # Save old size and do a size check at the end. Just a safety measure.
+ old_size = os.path.getsize(e)
+ if old_size >= 64:
+ arch = get_arch()
+ if arch:
+ parse_elf_header()
+ change_interpreter(e)
+ change_dl_sysdirs(e)
+
+ """ change permissions back """
+ if perms:
+ os.chmod(e, perms)
+
+ f.close()
+
+ if old_size != os.path.getsize(e):
+ print("New file size for %s is different. Looks like a relocation error!", e)
+ sys.exit(-1)
+
diff --git a/poky/scripts/rootfs_rpm-extract-postinst.awk b/poky/scripts/rootfs_rpm-extract-postinst.awk
new file mode 100644
index 000000000..8f2836b32
--- /dev/null
+++ b/poky/scripts/rootfs_rpm-extract-postinst.awk
@@ -0,0 +1,11 @@
+/Name:.*/ {
+ package = substr($0, 7)
+ next
+}
+/postinstall.*scriptlet .*/ {
+ next
+}
+{
+ print $0 >> ENVIRON["D"] "/etc/rpm-postinsts/" package ".sh"
+}
+
diff --git a/poky/scripts/rpm2cpio.sh b/poky/scripts/rpm2cpio.sh
new file mode 100755
index 000000000..cf23472ba
--- /dev/null
+++ b/poky/scripts/rpm2cpio.sh
@@ -0,0 +1,55 @@
+#!/bin/sh -efu
+
+# This file comes from rpm 4.x distribution
+
+fatal() {
+ echo "$*" >&2
+ exit 1
+}
+
+pkg="$1"
+[ -n "$pkg" -a -e "$pkg" ] ||
+ fatal "No package supplied"
+
+_dd() {
+ local o="$1"; shift
+ dd if="$pkg" skip="$o" iflag=skip_bytes status=none $*
+}
+
+calcsize() {
+ offset=$(($1 + 8))
+
+ local i b b0 b1 b2 b3 b4 b5 b6 b7
+
+ i=0
+ while [ $i -lt 8 ]; do
+ b="$(_dd $(($offset + $i)) bs=1 count=1)"
+ [ -z "$b" ] &&
+ b="0" ||
+ b="$(exec printf '%u\n' "'$b")"
+ eval "b$i=\$b"
+ i=$(($i + 1))
+ done
+
+ rsize=$((8 + ((($b0 << 24) + ($b1 << 16) + ($b2 << 8) + $b3) << 4) + ($b4 << 24) + ($b5 << 16) + ($b6 << 8) + $b7))
+ offset=$(($offset + $rsize))
+}
+
+case "$(_dd 0 bs=8 count=1)" in
+ "$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+esac
+
+calcsize 96
+sigsize=$rsize
+
+calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
+hdrsize=$rsize
+
+case "$(_dd $offset bs=3 count=1)" in
+ "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
+ *) fatal "Unrecognized rpm file: $pkg" ;;
+esac
diff --git a/poky/scripts/runqemu b/poky/scripts/runqemu
new file mode 100755
index 000000000..d99849406
--- /dev/null
+++ b/poky/scripts/runqemu
@@ -0,0 +1,1307 @@
+#!/usr/bin/env python3
+
+# Handle running OE images standalone with QEMU
+#
+# Copyright (C) 2006-2011 Linux Foundation
+# Copyright (c) 2016 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os
+import sys
+import logging
+import subprocess
+import re
+import fcntl
+import shutil
+import glob
+import configparser
+
+class RunQemuError(Exception):
+ """Custom exception to raise on known errors."""
+ pass
+
+class OEPathError(RunQemuError):
+ """Custom Exception to give better guidance on missing binaries"""
+ def __init__(self, message):
+ super().__init__("In order for this script to dynamically infer paths\n \
+kernels or filesystem images, you either need bitbake in your PATH\n \
+or to source oe-init-build-env before running this script.\n\n \
+Dynamic path inference can be avoided by passing a *.qemuboot.conf to\n \
+runqemu, i.e. `runqemu /path/to/my-image-name.qemuboot.conf`\n\n %s" % message)
+
+
+def create_logger():
+ logger = logging.getLogger('runqemu')
+ logger.setLevel(logging.INFO)
+
+ # create console handler and set level to debug
+ ch = logging.StreamHandler()
+ ch.setLevel(logging.DEBUG)
+
+ # create formatter
+ formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
+
+ # add formatter to ch
+ ch.setFormatter(formatter)
+
+ # add ch to logger
+ logger.addHandler(ch)
+
+ return logger
+
+logger = create_logger()
+
+def print_usage():
+ print("""
+Usage: you can run this script with any valid combination
+of the following environment variables (in any order):
+ KERNEL - the kernel image file to use
+ ROOTFS - the rootfs image file or nfsroot directory to use
+ DEVICE_TREE - the device tree blob to use
+ MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
+ Simplified QEMU command-line options can be passed with:
+ nographic - disable video console
+ serial - enable a serial console on /dev/ttyS0
+ slirp - enable user networking, no root privileges is required
+ kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
+ kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
+ publicvnc - enable a VNC server open to all hosts
+ audio - enable audio
+ [*/]ovmf* - OVMF firmware file or base name for booting with UEFI
+ tcpserial=<port> - specify tcp serial port number
+ biosdir=<dir> - specify custom bios dir
+ biosfilename=<filename> - specify bios filename
+ qemuparams=<xyz> - specify custom parameters to QEMU
+ bootparams=<xyz> - specify custom kernel parameters during boot
+ help, -h, --help: print this text
+ -d, --debug: Enable debug output
+ -q, --quite: Hide most output except error messages
+
+Examples:
+ runqemu
+ runqemu qemuarm
+ runqemu tmp/deploy/images/qemuarm
+ runqemu tmp/deploy/images/qemux86/<qemuboot.conf>
+ runqemu qemux86-64 core-image-sato ext4
+ runqemu qemux86-64 wic-image-minimal wic
+ runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
+ runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz...
+ runqemu qemux86 qemuparams="-m 256"
+ runqemu qemux86 bootparams="psplash=false"
+ runqemu path/to/<image>-<machine>.wic
+ runqemu path/to/<image>-<machine>.wic.vmdk
+""")
+
+def check_tun():
+ """Check /dev/net/tun"""
+ dev_tun = '/dev/net/tun'
+ if not os.path.exists(dev_tun):
+ raise RunQemuError("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
+
+ if not os.access(dev_tun, os.W_OK):
+ raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
+
+def check_libgl(qemu_bin):
+ cmd = 'ldd %s' % qemu_bin
+ logger.debug('Running %s...' % cmd)
+ need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ if re.search('libGLU', need_gl):
+ # We can't run without a libGL.so
+ libgl = False
+ check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
+ ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
+ ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
+
+ for (f1, f2) in check_files:
+ if re.search('\*', f1):
+ for g1 in glob.glob(f1):
+ if libgl:
+ break
+ if os.path.exists(g1):
+ for g2 in glob.glob(f2):
+ if os.path.exists(g2):
+ libgl = True
+ break
+ if libgl:
+ break
+ else:
+ if os.path.exists(f1) and os.path.exists(f2):
+ libgl = True
+ break
+ if not libgl:
+ logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
+ logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
+ logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
+ raise RunQemuError('%s requires libGLU, but not found' % qemu_bin)
+
+def get_first_file(cmds):
+ """Return first file found in wildcard cmds"""
+ for cmd in cmds:
+ all_files = glob.glob(cmd)
+ if all_files:
+ for f in all_files:
+ if not os.path.isdir(f):
+ return f
+ return ''
+
+def check_free_port(host, port):
+ """ Check whether the port is free or not """
+ import socket
+ from contextlib import closing
+
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((host, port)) == 0:
+ # Port is open, so not free
+ return False
+ else:
+ # Port is not open, so free
+ return True
+
+class BaseConfig(object):
+ def __init__(self):
+ # The self.d saved vars from self.set(), part of them are from qemuboot.conf
+ self.d = {'QB_KERNEL_ROOT': '/dev/vda'}
+
+ # Supported env vars, add it here if a var can be got from env,
+ # and don't use os.getenv in the code.
+ self.env_vars = ('MACHINE',
+ 'ROOTFS',
+ 'KERNEL',
+ 'DEVICE_TREE',
+ 'DEPLOY_DIR_IMAGE',
+ 'OE_TMPDIR',
+ 'OECORE_NATIVE_SYSROOT',
+ )
+
+ self.qemu_opt = ''
+ self.qemu_opt_script = ''
+ self.clean_nfs_dir = False
+ self.nfs_server = ''
+ self.rootfs = ''
+ # File name(s) of a OVMF firmware file or variable store,
+ # to be added with -drive if=pflash.
+ # Found in the same places as the rootfs, with or without one of
+ # these suffices: qcow2, bin.
+ # Setting one also adds "-vga std" because that is all that
+ # OVMF supports.
+ self.ovmf_bios = []
+ self.qemuboot = ''
+ self.qbconfload = False
+ self.kernel = ''
+ self.kernel_cmdline = ''
+ self.kernel_cmdline_script = ''
+ self.bootparams = ''
+ self.dtb = ''
+ self.fstype = ''
+ self.kvm_enabled = False
+ self.vhost_enabled = False
+ self.slirp_enabled = False
+ self.nfs_instance = 0
+ self.nfs_running = False
+ self.serialstdio = False
+ self.cleantap = False
+ self.saved_stty = ''
+ self.audio_enabled = False
+ self.tcpserial_portnum = ''
+ self.custombiosdir = ''
+ self.lock = ''
+ self.lock_descriptor = ''
+ self.bitbake_e = ''
+ self.snapshot = False
+ self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
+ 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
+ self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'wic.vmdk',
+ 'wic.qcow2', 'wic.vdi', 'iso')
+ self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+ # Use different mac section for tap and slirp to avoid
+ # conflicts, e.g., when one is running with tap, the other is
+ # running with slirp.
+ # The last section is dynamic, which is for avoiding conflicts,
+ # when multiple qemus are running, e.g., when multiple tap or
+ # slirp qemus are running.
+ self.mac_tap = "52:54:00:12:34:"
+ self.mac_slirp = "52:54:00:12:35:"
+
+ def acquire_lock(self, error=True):
+ logger.debug("Acquiring lockfile %s..." % self.lock)
+ try:
+ self.lock_descriptor = open(self.lock, 'w')
+ fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ msg = "Acquiring lockfile %s failed: %s" % (self.lock, e)
+ if error:
+ logger.error(msg)
+ else:
+ logger.info(msg)
+ if self.lock_descriptor:
+ self.lock_descriptor.close()
+ return False
+ return True
+
+ def release_lock(self):
+ fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
+ self.lock_descriptor.close()
+ os.remove(self.lock)
+
+ def get(self, key):
+ if key in self.d:
+ return self.d.get(key)
+ elif os.getenv(key):
+ return os.getenv(key)
+ else:
+ return ''
+
+ def set(self, key, value):
+ self.d[key] = value
+
+ def is_deploy_dir_image(self, p):
+ if os.path.isdir(p):
+ if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M):
+ logger.debug("Can't find required *.qemuboot.conf in %s" % p)
+ return False
+ if not any(map(lambda name: '-image-' in name, os.listdir(p))):
+ logger.debug("Can't find *-image-* in %s" % p)
+ return False
+ return True
+ else:
+ return False
+
+ def check_arg_fstype(self, fst):
+ """Check and set FSTYPE"""
+ if fst not in self.fstypes + self.vmtypes:
+ logger.warn("Maybe unsupported FSTYPE: %s" % fst)
+ if not self.fstype or self.fstype == fst:
+ if fst == 'ramfs':
+ fst = 'cpio.gz'
+ if fst in ('tar.bz2', 'tar.gz'):
+ fst = 'nfs'
+ self.fstype = fst
+ else:
+ raise RunQemuError("Conflicting: FSTYPE %s and %s" % (self.fstype, fst))
+
+ def set_machine_deploy_dir(self, machine, deploy_dir_image):
+ """Set MACHINE and DEPLOY_DIR_IMAGE"""
+ logger.debug('MACHINE: %s' % machine)
+ self.set("MACHINE", machine)
+ logger.debug('DEPLOY_DIR_IMAGE: %s' % deploy_dir_image)
+ self.set("DEPLOY_DIR_IMAGE", deploy_dir_image)
+
+ def check_arg_nfs(self, p):
+ if os.path.isdir(p):
+ self.rootfs = p
+ else:
+ m = re.match('(.*):(.*)', p)
+ self.nfs_server = m.group(1)
+ self.rootfs = m.group(2)
+ self.check_arg_fstype('nfs')
+
+ def check_arg_path(self, p):
+ """
+ - Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
+ - Check whether is a kernel file
+ - Check whether is a image file
+ - Check whether it is a nfs dir
+ - Check whether it is a OVMF flash file
+ """
+ if p.endswith('.qemuboot.conf'):
+ self.qemuboot = p
+ self.qbconfload = True
+ elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ re.search('zImage', p) or re.search('vmlinux', p) or \
+ re.search('fitImage', p) or re.search('uImage', p):
+ self.kernel = p
+ elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
+ self.rootfs = p
+ # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+ # otherwise, its type would be "gz", which is incorrect.
+ fst = ""
+ for t in self.fstypes:
+ if p.endswith(t):
+ fst = t
+ break
+ if not fst:
+ m = re.search('.*\.(.*)$', self.rootfs)
+ if m:
+ fst = m.group(1)
+ if fst:
+ self.check_arg_fstype(fst)
+ qb = re.sub('\.' + fst + "$", '', self.rootfs)
+ qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
+ if os.path.exists(qb):
+ self.qemuboot = qb
+ self.qbconfload = True
+ else:
+ logger.warn("%s doesn't exist" % qb)
+ else:
+ raise RunQemuError("Can't find FSTYPE from: %s" % p)
+
+ elif os.path.isdir(p) or re.search(':', p) and re.search('/', p):
+ if self.is_deploy_dir_image(p):
+ logger.debug('DEPLOY_DIR_IMAGE: %s' % p)
+ self.set("DEPLOY_DIR_IMAGE", p)
+ else:
+ logger.debug("Assuming %s is an nfs rootfs" % p)
+ self.check_arg_nfs(p)
+ elif os.path.basename(p).startswith('ovmf'):
+ self.ovmf_bios.append(p)
+ else:
+ raise RunQemuError("Unknown path arg %s" % p)
+
+ def check_arg_machine(self, arg):
+ """Check whether it is a machine"""
+ if self.get('MACHINE') == arg:
+ return
+ elif self.get('MACHINE') and self.get('MACHINE') != arg:
+ raise RunQemuError("Maybe conflicted MACHINE: %s vs %s" % (self.get('MACHINE'), arg))
+ elif re.search('/', arg):
+ raise RunQemuError("Unknown arg: %s" % arg)
+
+ logger.debug('Assuming MACHINE = %s' % arg)
+
+ # if we're running under testimage, or similarly as a child
+ # of an existing bitbake invocation, we can't invoke bitbake
+ # to validate the MACHINE setting and must assume it's correct...
+ # FIXME: testimage.bbclass exports these two variables into env,
+ # are there other scenarios in which we need to support being
+ # invoked by bitbake?
+ deploy = self.get('DEPLOY_DIR_IMAGE')
+ bbchild = deploy and self.get('OE_TMPDIR')
+ if bbchild:
+ self.set_machine_deploy_dir(arg, deploy)
+ return
+ # also check whether we're running under a sourced toolchain
+ # environment file
+ if self.get('OECORE_NATIVE_SYSROOT'):
+ self.set("MACHINE", arg)
+ return
+
+ cmd = 'MACHINE=%s bitbake -e' % arg
+ logger.info('Running %s...' % cmd)
+ self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ # bitbake -e doesn't report invalid MACHINE as an error, so
+ # let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
+ # MACHINE.
+ s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
+ if s:
+ deploy_dir_image = s.group(1)
+ else:
+ raise RunQemuError("bitbake -e %s" % self.bitbake_e)
+ if self.is_deploy_dir_image(deploy_dir_image):
+ self.set_machine_deploy_dir(arg, deploy_dir_image)
+ else:
+ logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
+ self.set("MACHINE", arg)
+
+ def check_args(self):
+ for debug in ("-d", "--debug"):
+ if debug in sys.argv:
+ logger.setLevel(logging.DEBUG)
+ sys.argv.remove(debug)
+
+ for quiet in ("-q", "--quiet"):
+ if quiet in sys.argv:
+ logger.setLevel(logging.ERROR)
+ sys.argv.remove(quiet)
+
+ unknown_arg = ""
+ for arg in sys.argv[1:]:
+ if arg in self.fstypes + self.vmtypes:
+ self.check_arg_fstype(arg)
+ elif arg == 'nographic':
+ self.qemu_opt_script += ' -nographic'
+ self.kernel_cmdline_script += ' console=ttyS0'
+ elif arg == 'serial':
+ self.kernel_cmdline_script += ' console=ttyS0'
+ self.serialstdio = True
+ elif arg == 'audio':
+ logger.info("Enabling audio in qemu")
+ logger.info("Please install sound drivers in linux host")
+ self.audio_enabled = True
+ elif arg == 'kvm':
+ self.kvm_enabled = True
+ elif arg == 'kvm-vhost':
+ self.vhost_enabled = True
+ elif arg == 'slirp':
+ self.slirp_enabled = True
+ elif arg == 'snapshot':
+ self.snapshot = True
+ elif arg == 'publicvnc':
+ self.qemu_opt_script += ' -vnc :0'
+ elif arg.startswith('tcpserial='):
+ self.tcpserial_portnum = arg[len('tcpserial='):]
+ elif arg.startswith('biosdir='):
+ self.custombiosdir = arg[len('biosdir='):]
+ elif arg.startswith('biosfilename='):
+ self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ elif arg.startswith('qemuparams='):
+ self.qemu_opt_script += ' %s' % arg[len('qemuparams='):]
+ elif arg.startswith('bootparams='):
+ self.bootparams = arg[len('bootparams='):]
+ elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
+ self.check_arg_path(os.path.abspath(arg))
+ elif re.search(r'-image-|-image$', arg):
+ # Lazy rootfs
+ self.rootfs = arg
+ elif arg.startswith('ovmf'):
+ self.ovmf_bios.append(arg)
+ else:
+ # At last, assume it is the MACHINE
+ if (not unknown_arg) or unknown_arg == arg:
+ unknown_arg = arg
+ else:
+ raise RunQemuError("Can't handle two unknown args: %s %s\n"
+ "Try 'runqemu help' on how to use it" % \
+ (unknown_arg, arg))
+ # Check to make sure it is a valid machine
+ if unknown_arg:
+ if self.get('MACHINE') == unknown_arg:
+ return
+ if self.get('DEPLOY_DIR_IMAGE'):
+ machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE'))
+ if unknown_arg == machine:
+ self.set("MACHINE", machine)
+ return
+
+ self.check_arg_machine(unknown_arg)
+
+ if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
+ self.load_bitbake_env()
+ s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
+ if s:
+ self.set("DEPLOY_DIR_IMAGE", s.group(1))
+
+ def check_kvm(self):
+ """Check kvm and kvm-host"""
+ if not (self.kvm_enabled or self.vhost_enabled):
+ self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'))
+ return
+
+ if not self.get('QB_CPU_KVM'):
+ raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
+
+ self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
+ yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
+ yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
+ dev_kvm = '/dev/kvm'
+ dev_vhost = '/dev/vhost-net'
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise RunQemuError(yocto_kvm_wiki)
+
+ if not os.path.exists(dev_kvm):
+ logger.error("Missing KVM device. Have you inserted kvm modules?")
+ logger.error("For further help see:")
+ raise RunQemuError(yocto_kvm_wiki)
+
+ if os.access(dev_kvm, os.W_OK|os.R_OK):
+ self.qemu_opt_script += ' -enable-kvm'
+ if self.get('MACHINE') == "qemux86":
+ # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
+ # See YOCTO #12301
+ # On 64 bit we use x2apic
+ self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
+ else:
+ logger.error("You have no read or write permission on /dev/kvm.")
+ logger.error("Please change the ownership of this file as described at:")
+ raise RunQemuError(yocto_kvm_wiki)
+
+ if self.vhost_enabled:
+ if not os.path.exists(dev_vhost):
+ logger.error("Missing virtio net device. Have you inserted vhost-net module?")
+ logger.error("For further help see:")
+ raise RunQemuError(yocto_paravirt_kvm_wiki)
+
+ if not os.access(dev_kvm, os.W_OK|os.R_OK):
+ logger.error("You have no read or write permission on /dev/vhost-net.")
+ logger.error("Please change the ownership of this file as described at:")
+ raise RunQemuError(yocto_kvm_wiki)
+
+ def check_fstype(self):
+ """Check and setup FSTYPE"""
+ if not self.fstype:
+ fstype = self.get('QB_DEFAULT_FSTYPE')
+ if fstype:
+ self.fstype = fstype
+ else:
+ raise RunQemuError("FSTYPE is NULL!")
+
+ def check_rootfs(self):
+ """Check and set rootfs"""
+
+ if self.fstype == "none":
+ return
+
+ if self.get('ROOTFS'):
+ if not self.rootfs:
+ self.rootfs = self.get('ROOTFS')
+ elif self.get('ROOTFS') != self.rootfs:
+ raise RunQemuError("Maybe conflicted ROOTFS: %s vs %s" % (self.get('ROOTFS'), self.rootfs))
+
+ if self.fstype == 'nfs':
+ return
+
+ if self.rootfs and not os.path.exists(self.rootfs):
+ # Lazy rootfs
+ self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
+ self.rootfs, self.get('MACHINE'),
+ self.fstype)
+ elif not self.rootfs:
+ cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
+ cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
+ cmds = (cmd_name, cmd_link)
+ self.rootfs = get_first_file(cmds)
+ if not self.rootfs:
+ raise RunQemuError("Failed to find rootfs: %s or %s" % cmds)
+
+ if not os.path.exists(self.rootfs):
+ raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
+
+ def check_ovmf(self):
+ """Check and set full path for OVMF firmware and variable file(s)."""
+
+ for index, ovmf in enumerate(self.ovmf_bios):
+ if os.path.exists(ovmf):
+ continue
+ for suffix in ('qcow2', 'bin'):
+ path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
+ if os.path.exists(path):
+ self.ovmf_bios[index] = path
+ break
+ else:
+ raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
+
+ def check_kernel(self):
+ """Check and set kernel"""
+ # The vm image doesn't need a kernel
+ if self.fstype in self.vmtypes:
+ return
+
+ # See if the user supplied a KERNEL option
+ if self.get('KERNEL'):
+ self.kernel = self.get('KERNEL')
+
+ # QB_DEFAULT_KERNEL is always a full file path
+ kernel_name = os.path.basename(self.get('QB_DEFAULT_KERNEL'))
+
+ # The user didn't want a kernel to be loaded
+ if kernel_name == "none" and not self.kernel:
+ return
+
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ if not self.kernel:
+ kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
+ kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
+ kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
+ cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
+ self.kernel = get_first_file(cmds)
+ if not self.kernel:
+ raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds)
+
+ if not os.path.exists(self.kernel):
+ raise RunQemuError("KERNEL %s not found" % self.kernel)
+
+ def check_dtb(self):
+ """Check and set dtb"""
+ # Did the user specify a device tree?
+ if self.get('DEVICE_TREE'):
+ self.dtb = self.get('DEVICE_TREE')
+ if not os.path.exists(self.dtb):
+ raise RunQemuError('Specified DTB not found: %s' % self.dtb)
+ return
+
+ dtb = self.get('QB_DTB')
+ if dtb:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ cmd_match = "%s/%s" % (deploy_dir_image, dtb)
+ cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
+ cmd_wild = "%s/*.dtb" % deploy_dir_image
+ cmds = (cmd_match, cmd_startswith, cmd_wild)
+ self.dtb = get_first_file(cmds)
+ if not os.path.exists(self.dtb):
+ raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
+
+ def check_biosdir(self):
+ """Check custombiosdir"""
+ if not self.custombiosdir:
+ return
+
+ biosdir = ""
+ biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
+ biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
+ for i in (self.custombiosdir, biosdir_native, biosdir_host):
+ if os.path.isdir(i):
+ biosdir = i
+ break
+
+ if biosdir:
+ logger.debug("Assuming biosdir is: %s" % biosdir)
+ self.qemu_opt_script += ' -L %s' % biosdir
+ else:
+ logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
+ raise RunQemuError("Invalid custombiosdir: %s" % self.custombiosdir)
+
+ def check_mem(self):
+ s = re.search('-m +([0-9]+)', self.qemu_opt_script)
+ if s:
+ self.set('QB_MEM', '-m %s' % s.group(1))
+ elif not self.get('QB_MEM'):
+ logger.info('QB_MEM is not set, use 512M by default')
+ self.set('QB_MEM', '-m 512')
+
+ self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
+ self.qemu_opt_script += ' %s' % self.get('QB_MEM')
+
+ def check_tcpserial(self):
+ if self.tcpserial_portnum:
+ if self.get('QB_TCPSERIAL_OPT'):
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ else:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+
+ def check_and_set(self):
+ """Check configs sanity and set when needed"""
+ self.validate_paths()
+ if not self.slirp_enabled:
+ check_tun()
+ # Check audio
+ if self.audio_enabled:
+ if not self.get('QB_AUDIO_DRV'):
+ raise RunQemuError("QB_AUDIO_DRV is NULL, this board doesn't support audio")
+ if not self.get('QB_AUDIO_OPT'):
+ logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
+ else:
+ self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')
+ os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))
+ else:
+ os.putenv('QEMU_AUDIO_DRV', 'none')
+
+ self.check_kvm()
+ self.check_fstype()
+ self.check_rootfs()
+ self.check_ovmf()
+ self.check_kernel()
+ self.check_dtb()
+ self.check_biosdir()
+ self.check_mem()
+ self.check_tcpserial()
+
+ def read_qemuboot(self):
+ if not self.qemuboot:
+ if self.get('DEPLOY_DIR_IMAGE'):
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ else:
+ logger.warn("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
+ return
+
+ if self.rootfs and not os.path.exists(self.rootfs):
+ # Lazy rootfs
+ machine = self.get('MACHINE')
+ if not machine:
+ machine = os.path.basename(deploy_dir_image)
+ self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image,
+ self.rootfs, machine)
+ else:
+ cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
+ logger.debug('Running %s...' % cmd)
+ try:
+ qbs = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ raise RunQemuError(err)
+ if qbs:
+ for qb in qbs.split():
+ # Don't use initramfs when other choices unless fstype is ramfs
+ if '-initramfs-' in os.path.basename(qb) and self.fstype != 'cpio.gz':
+ continue
+ self.qemuboot = qb
+ break
+ if not self.qemuboot:
+ # Use the first one when no choice
+ self.qemuboot = qbs.split()[0]
+ self.qbconfload = True
+
+ if not self.qemuboot:
+ # If we haven't found a .qemuboot.conf at this point it probably
+ # doesn't exist, continue without
+ return
+
+ if not os.path.exists(self.qemuboot):
+ raise RunQemuError("Failed to find %s (wrong image name or BSP does not support running under qemu?)." % self.qemuboot)
+
+ logger.debug('CONFFILE: %s' % self.qemuboot)
+
+ cf = configparser.ConfigParser()
+ cf.read(self.qemuboot)
+ for k, v in cf.items('config_bsp'):
+ k_upper = k.upper()
+ if v.startswith("../"):
+ v = os.path.abspath(os.path.dirname(self.qemuboot) + "/" + v)
+ elif v == ".":
+ v = os.path.dirname(self.qemuboot)
+ self.set(k_upper, v)
+
+ def validate_paths(self):
+ """Ensure all relevant path variables are set"""
+ # When we're started with a *.qemuboot.conf arg assume that image
+ # artefacts are relative to that file, rather than in whatever
+ # directory DEPLOY_DIR_IMAGE in the conf file points to.
+ if self.qbconfload:
+ imgdir = os.path.realpath(os.path.dirname(self.qemuboot))
+ if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):
+ logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))
+ self.set('DEPLOY_DIR_IMAGE', imgdir)
+
+ # If the STAGING_*_NATIVE directories from the config file don't exist
+ # and we're in a sourced OE build directory try to extract the paths
+ # from `bitbake -e`
+ havenative = os.path.exists(self.get('STAGING_DIR_NATIVE')) and \
+ os.path.exists(self.get('STAGING_BINDIR_NATIVE'))
+
+ if not havenative:
+ if not self.bitbake_e:
+ self.load_bitbake_env()
+
+ if self.bitbake_e:
+ native_vars = ['STAGING_DIR_NATIVE']
+ for nv in native_vars:
+ s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M)
+ if s and s.group(1) != self.get(nv):
+ logger.info('Overriding conf file setting of %s to %s from Bitbake environment' % (nv, s.group(1)))
+ self.set(nv, s.group(1))
+ else:
+ # when we're invoked from a running bitbake instance we won't
+ # be able to call `bitbake -e`, then try:
+ # - get OE_TMPDIR from environment and guess paths based on it
+ # - get OECORE_NATIVE_SYSROOT from environment (for sdk)
+ tmpdir = self.get('OE_TMPDIR')
+ oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')
+ if tmpdir:
+ logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)
+ hostos, _, _, _, machine = os.uname()
+ buildsys = '%s-%s' % (machine, hostos.lower())
+ staging_dir_native = '%s/sysroots/%s' % (tmpdir, buildsys)
+ self.set('STAGING_DIR_NATIVE', staging_dir_native)
+ elif oecore_native_sysroot:
+ logger.info('Setting STAGING_DIR_NATIVE to OECORE_NATIVE_SYSROOT (%s)' % oecore_native_sysroot)
+ self.set('STAGING_DIR_NATIVE', oecore_native_sysroot)
+ if self.get('STAGING_DIR_NATIVE'):
+ # we have to assume that STAGING_BINDIR_NATIVE is at usr/bin
+ staging_bindir_native = '%s/usr/bin' % self.get('STAGING_DIR_NATIVE')
+ logger.info('Setting STAGING_BINDIR_NATIVE to %s' % staging_bindir_native)
+ self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
+
+ def print_config(self):
+ logger.info('Continuing with the following parameters:\n')
+ if not self.fstype in self.vmtypes:
+ print('KERNEL: [%s]' % self.kernel)
+ if self.dtb:
+ print('DTB: [%s]' % self.dtb)
+ print('MACHINE: [%s]' % self.get('MACHINE'))
+ print('FSTYPE: [%s]' % self.fstype)
+ if self.fstype == 'nfs':
+ print('NFS_DIR: [%s]' % self.rootfs)
+ else:
+ print('ROOTFS: [%s]' % self.rootfs)
+ if self.ovmf_bios:
+ print('OVMF: %s' % self.ovmf_bios)
+ print('CONFFILE: [%s]' % self.qemuboot)
+ print('')
+
+ def setup_nfs(self):
+ if not self.nfs_server:
+ if self.slirp_enabled:
+ self.nfs_server = '10.0.2.2'
+ else:
+ self.nfs_server = '192.168.7.1'
+
+ # Figure out a new nfs_instance to allow multiple qemus running.
+ # CentOS 7.1's ps doesn't print full command line without "ww"
+ # when invoke by subprocess.Popen().
+ cmd = "ps auxww"
+ ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
+ all_instances = re.findall(pattern, ps, re.M)
+ if all_instances:
+ all_instances.sort(key=int)
+ self.nfs_instance = int(all_instances.pop()) + 1
+
+ nfsd_port = 3049 + 2 * self.nfs_instance
+ mountd_port = 3048 + 2 * self.nfs_instance
+
+ # Export vars for runqemu-export-rootfs
+ export_dict = {
+ 'NFS_INSTANCE': self.nfs_instance,
+ 'NFSD_PORT': nfsd_port,
+ 'MOUNTD_PORT': mountd_port,
+ }
+ for k, v in export_dict.items():
+ # Use '%s' since they are integers
+ os.putenv(k, '%s' % v)
+
+ self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port)
+
+ # Extract .tar.bz2 or .tar.bz if no nfs dir
+ if not (self.rootfs and os.path.isdir(self.rootfs)):
+ src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'))
+ dest = "%s-nfsroot" % src_prefix
+ if os.path.exists('%s.pseudo_state' % dest):
+ logger.info('Use %s as NFS_DIR' % dest)
+ self.rootfs = dest
+ else:
+ src = ""
+ src1 = '%s.tar.bz2' % src_prefix
+ src2 = '%s.tar.gz' % src_prefix
+ if os.path.exists(src1):
+ src = src1
+ elif os.path.exists(src2):
+ src = src2
+ if not src:
+ raise RunQemuError("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
+ logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest))
+ cmd = 'runqemu-extract-sdk %s %s' % (src, dest)
+ logger.info('Running %s...' % cmd)
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % cmd)
+ self.clean_nfs_dir = True
+ self.rootfs = dest
+
+ # Start the userspace NFS server
+ cmd = 'runqemu-export-rootfs start %s' % self.rootfs
+ logger.info('Running %s...' % cmd)
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % cmd)
+
+ self.nfs_running = True
+
+ def setup_slirp(self):
+ """Setup user networking"""
+
+ if self.fstype == 'nfs':
+ self.setup_nfs()
+ self.kernel_cmdline_script += ' ip=dhcp'
+ # Port mapping
+ hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+ qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
+ qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
+ # Figure out the port
+ ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
+ ports = [int(i) for i in ports]
+ mac = 2
+ # Find a free port to avoid conflicts
+ for p in ports[:]:
+ p_new = p
+ while not check_free_port('localhost', p_new):
+ p_new += 1
+ mac += 1
+ while p_new in ports:
+ p_new += 1
+ mac += 1
+ if p != p_new:
+ ports.append(p_new)
+ qb_slirp_opt = re.sub(':%s-' % p, ':%s-' % p_new, qb_slirp_opt)
+ logger.info("Port forward changed: %s -> %s" % (p, p_new))
+ mac = "%s%02x" % (self.mac_slirp, mac)
+ self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qb_slirp_opt))
+ # Print out port foward
+ hostfwd = re.findall('(hostfwd=[^,]*)', qb_slirp_opt)
+ if hostfwd:
+ logger.info('Port forward: %s' % ' '.join(hostfwd))
+
+ def setup_tap(self):
+ """Setup tap"""
+
+ # This file is created when runqemu-gen-tapdevs creates a bank of tap
+ # devices, indicating that the user should not bring up new ones using
+ # sudo.
+ nosudo_flag = '/etc/runqemu-nosudo'
+ self.qemuifup = shutil.which('runqemu-ifup')
+ self.qemuifdown = shutil.which('runqemu-ifdown')
+ ip = shutil.which('ip')
+ lockdir = "/tmp/qemu-tap-locks"
+
+ if not (self.qemuifup and self.qemuifdown and ip):
+ logger.error("runqemu-ifup: %s" % self.qemuifup)
+ logger.error("runqemu-ifdown: %s" % self.qemuifdown)
+ logger.error("ip: %s" % ip)
+ raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
+
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+
+ cmd = '%s link' % ip
+ logger.debug('Running %s...' % cmd)
+ ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ # Matches line like: 6: tap0: <foo>
+ possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ tap = ""
+ for p in possibles:
+ lockfile = os.path.join(lockdir, p)
+ if os.path.exists('%s.skip' % lockfile):
+ logger.info('Found %s.skip, skipping %s' % (lockfile, p))
+ continue
+ self.lock = lockfile + '.lock'
+ if self.acquire_lock(error=False):
+ tap = p
+ logger.info("Using preconfigured tap device %s" % tap)
+ logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
+ break
+
+ if not tap:
+ if os.path.exists(nosudo_flag):
+ logger.error("Error: There are no available tap devices to use for networking,")
+ logger.error("and I see %s exists, so I am not going to try creating" % nosudo_flag)
+ raise RunQemuError("a new one with sudo.")
+
+ gid = os.getgid()
+ uid = os.getuid()
+ logger.info("Setting up tap interface under sudo")
+ cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.bindir_native)
+ tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n')
+ lockfile = os.path.join(lockdir, tap)
+ self.lock = lockfile + '.lock'
+ self.acquire_lock()
+ self.cleantap = True
+ logger.debug('Created tap: %s' % tap)
+
+ if not tap:
+ logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
+ return 1
+ self.tap = tap
+ tapnum = int(tap[3:])
+ gateway = tapnum * 2 + 1
+ client = gateway + 1
+ if self.fstype == 'nfs':
+ self.setup_nfs()
+ netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
+ logger.info("Network configuration: %s", netconf)
+ self.kernel_cmdline_script += " ip=%s" % netconf
+ mac = "%s%02x" % (self.mac_tap, client)
+ qb_tap_opt = self.get('QB_TAP_OPT')
+ if qb_tap_opt:
+ qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap)
+ else:
+ qemu_tap_opt = "-netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (self.tap)
+
+ if self.vhost_enabled:
+ qemu_tap_opt += ',vhost=on'
+
+ self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
+
+ def setup_network(self):
+ if self.get('QB_NET') == 'none':
+ return
+ if sys.stdin.isatty():
+ self.saved_stty = subprocess.check_output("stty -g", shell=True).decode('utf-8')
+ self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
+ if self.slirp_enabled:
+ self.setup_slirp()
+ else:
+ self.setup_tap()
+
+ def setup_rootfs(self):
+ if self.get('QB_ROOTFS') == 'none':
+ return
+ if 'wic.' in self.fstype:
+ self.fstype = self.fstype[4:]
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+
+ qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
+ if qb_rootfs_opt:
+ self.rootfs_options = qb_rootfs_opt.replace('@ROOTFS@', self.rootfs)
+ else:
+ self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
+
+ if self.fstype in ('cpio.gz', 'cpio'):
+ self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
+ self.rootfs_options = '-initrd %s' % self.rootfs
+ else:
+ vm_drive = ''
+ if self.fstype in self.vmtypes:
+ if self.fstype == 'iso':
+ vm_drive = '-drive file=%s,if=virtio,media=cdrom' % self.rootfs
+ elif self.get('QB_DRIVE_TYPE'):
+ drive_type = self.get('QB_DRIVE_TYPE')
+ if drive_type.startswith("/dev/sd"):
+ logger.info('Using scsi drive')
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
+ % (self.rootfs, rootfs_format)
+ elif drive_type.startswith("/dev/hd"):
+ logger.info('Using ide drive')
+ vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
+ else:
+ # virtio might have been selected explicitly (just use it), or
+ # is used as fallback (then warn about that).
+ if not drive_type.startswith("/dev/vd"):
+ logger.warn("Unknown QB_DRIVE_TYPE: %s" % drive_type)
+ logger.warn("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
+ logger.warn('Trying to use virtio block drive')
+ vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
+
+ # All branches above set vm_drive.
+ self.rootfs_options = '%s -no-reboot' % vm_drive
+ self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+
+ if self.fstype == 'nfs':
+ self.rootfs_options = ''
+ k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
+ self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+
+ if self.fstype == 'none':
+ self.rootfs_options = ''
+
+ self.set('ROOTFS_OPTIONS', self.rootfs_options)
+
+ def guess_qb_system(self):
+ """attempt to determine the appropriate qemu-system binary"""
+ mach = self.get('MACHINE')
+ if not mach:
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
+ if self.rootfs:
+ match = re.match(search, self.rootfs)
+ if match:
+ mach = match.group(1)
+ elif self.kernel:
+ match = re.match(search, self.kernel)
+ if match:
+ mach = match.group(1)
+
+ if not mach:
+ return None
+
+ if mach == 'qemuarm':
+ qbsys = 'arm'
+ elif mach == 'qemuarm64':
+ qbsys = 'aarch64'
+ elif mach == 'qemux86':
+ qbsys = 'i386'
+ elif mach == 'qemux86-64':
+ qbsys = 'x86_64'
+ elif mach == 'qemuppc':
+ qbsys = 'ppc'
+ elif mach == 'qemumips':
+ qbsys = 'mips'
+ elif mach == 'qemumips64':
+ qbsys = 'mips64'
+ elif mach == 'qemumipsel':
+ qbsys = 'mipsel'
+ elif mach == 'qemumips64el':
+ qbsys = 'mips64el'
+ elif mach == 'qemuriscv64':
+ qbsys = 'riscv64'
+ elif mach == 'qemuriscv32':
+ qbsys = 'riscv32'
+
+ return 'qemu-system-%s' % qbsys
+
+ def setup_final(self):
+ qemu_system = self.get('QB_SYSTEM_NAME')
+ if not qemu_system:
+ qemu_system = self.guess_qb_system()
+ if not qemu_system:
+ raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
+
+ qemu_bin = '%s/%s' % (self.bindir_native, qemu_system)
+
+ # It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
+ # find QEMU in sysroot, it needs to use host's qemu.
+ if not os.path.exists(qemu_bin):
+ logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
+ for path in (os.environ['PATH'] or '').split(':'):
+ qemu_bin_tmp = os.path.join(path, qemu_system)
+ logger.info("Trying: %s" % qemu_bin_tmp)
+ if os.path.exists(qemu_bin_tmp):
+ qemu_bin = qemu_bin_tmp
+ if not os.path.isabs(qemu_bin):
+ qemu_bin = os.path.abspath(qemu_bin)
+ logger.info("Using host's QEMU: %s" % qemu_bin)
+ break
+
+ if not os.access(qemu_bin, os.X_OK):
+ raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
+
+ check_libgl(qemu_bin)
+
+ self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+
+ for ovmf in self.ovmf_bios:
+ format = ovmf.rsplit('.', 1)[-1]
+ self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
+ if self.ovmf_bios:
+ # OVMF only supports normal VGA, i.e. we need to override a -vga vmware
+ # that gets added for example for normal qemux86.
+ self.qemu_opt += ' -vga std'
+
+ self.qemu_opt += ' ' + self.qemu_opt_script
+
+ if self.snapshot:
+ self.qemu_opt += " -snapshot"
+
+ if self.serialstdio:
+ if sys.stdin.isatty():
+ subprocess.check_call("stty intr ^]", shell=True)
+ logger.info("Interrupt character is '^]'")
+
+ first_serial = ""
+ if not re.search("-nographic", self.qemu_opt):
+ first_serial = "-serial mon:vc"
+ # We always want a ttyS1. Since qemu by default adds a serial
+ # port when nodefaults is not specified, it seems that all that
+ # would be needed is to make sure a "-serial" is there. However,
+ # it appears that when "-serial" is specified, it ignores the
+ # default serial port that is normally added. So here we make
+ # sure to add two -serial if there are none. And only one if
+ # there is one -serial already.
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num == 0:
+ self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT"))
+ elif serial_num == 1:
+ self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
+
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
+ # if not serial or serialtcp options was specified only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num == 0:
+ if re.search("-nographic", self.qemu_opt):
+ self.qemu_opt += " -serial mon:stdio -serial null"
+ else:
+ self.qemu_opt += " -serial mon:vc -serial null"
+
+ def start_qemu(self):
+ if self.kernel:
+ kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
+ self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
+ self.bootparams)
+ if self.dtb:
+ kernel_opts += " -dtb %s" % self.dtb
+ else:
+ kernel_opts = ""
+ cmd = "%s %s" % (self.qemu_opt, kernel_opts)
+ logger.info('Running %s\n' % cmd)
+ process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
+ if process.wait():
+ logger.error("Failed to run qemu: %s", process.stderr.read().decode())
+
+ def cleanup(self):
+ if self.cleantap:
+ cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.bindir_native)
+ logger.debug('Running %s' % cmd)
+ subprocess.check_call(cmd, shell=True)
+ if self.lock_descriptor:
+ logger.info("Releasing lockfile for tap device '%s'" % self.tap)
+ self.release_lock()
+
+ if self.nfs_running:
+ logger.info("Shutting down the userspace NFS server...")
+ cmd = "runqemu-export-rootfs stop %s" % self.rootfs
+ logger.debug('Running %s' % cmd)
+ subprocess.check_call(cmd, shell=True)
+
+ if self.saved_stty:
+ cmd = "stty %s" % self.saved_stty
+ subprocess.check_call(cmd, shell=True)
+
+ if self.clean_nfs_dir:
+ logger.info('Removing %s' % self.rootfs)
+ shutil.rmtree(self.rootfs)
+ shutil.rmtree('%s.pseudo_state' % self.rootfs)
+
+ def load_bitbake_env(self, mach=None):
+ if self.bitbake_e:
+ return
+
+ bitbake = shutil.which('bitbake')
+ if not bitbake:
+ return
+
+ if not mach:
+ mach = self.get('MACHINE')
+
+ if mach:
+ cmd = 'MACHINE=%s bitbake -e' % mach
+ else:
+ cmd = 'bitbake -e'
+
+ logger.info('Running %s...' % cmd)
+ try:
+ self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ self.bitbake_e = ''
+ logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+
+ def validate_combos(self):
+ if (self.fstype in self.vmtypes) and self.kernel:
+ raise RunQemuError("%s doesn't need kernel %s!" % (self.fstype, self.kernel))
+
+ @property
+ def bindir_native(self):
+ result = self.get('STAGING_BINDIR_NATIVE')
+ if result and os.path.exists(result):
+ return result
+
+ cmd = 'bitbake qemu-helper-native -e'
+ logger.info('Running %s...' % cmd)
+ out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+ out = out.stdout.read().decode('utf-8')
+
+ match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M)
+ if match:
+ result = match.group(1)
+ if os.path.exists(result):
+ self.set('STAGING_BINDIR_NATIVE', result)
+ return result
+ raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
+ else:
+ raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+
+
+def main():
+ if "help" in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
+ print_usage()
+ return 0
+ try:
+ config = BaseConfig()
+ config.check_args()
+ config.read_qemuboot()
+ config.check_and_set()
+ # Check whether the combos is valid or not
+ config.validate_combos()
+ config.print_config()
+ config.setup_network()
+ config.setup_rootfs()
+ config.setup_final()
+ config.start_qemu()
+ except RunQemuError as err:
+ logger.error(err)
+ return 1
+ except Exception as err:
+ import traceback
+ traceback.print_exc()
+ return 1
+ finally:
+ print("Cleanup")
+ config.cleanup()
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/poky/scripts/runqemu-addptable2image b/poky/scripts/runqemu-addptable2image
new file mode 100755
index 000000000..f0195ad8a
--- /dev/null
+++ b/poky/scripts/runqemu-addptable2image
@@ -0,0 +1,51 @@
+#!/bin/sh
+
+# Add a partion table to an ext2 image file
+#
+# Copyright (C) 2006-2007 OpenedHand Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+IMAGE=$1
+IMAGEOUT=$2
+
+echo $IMAGE
+echo $IMAGEOUT
+
+size=`ls -l $IMAGE | awk '{ print $5}'`
+size2=`expr $size / 512 / 16 / 63`
+
+echo $size
+echo $size2
+
+# MBR Size = 512 * 63 bytes
+dd if=/dev/zero of=$IMAGEOUT count=63
+
+echo "x" > /tmp/fdisk.cmds
+echo "c" >> /tmp/fdisk.cmds
+echo "1024" >> /tmp/fdisk.cmds
+echo "h" >> /tmp/fdisk.cmds
+echo "16" >> /tmp/fdisk.cmds
+echo "r" >> /tmp/fdisk.cmds
+echo "n" >> /tmp/fdisk.cmds
+echo "p" >> /tmp/fdisk.cmds
+echo "1" >> /tmp/fdisk.cmds
+echo "1" >> /tmp/fdisk.cmds
+echo "$size2" >> /tmp/fdisk.cmds
+echo "w" >> /tmp/fdisk.cmds
+
+/sbin/fdisk $IMAGEOUT < /tmp/fdisk.cmds
+cat $IMAGE >> $IMAGEOUT
diff --git a/poky/scripts/runqemu-export-rootfs b/poky/scripts/runqemu-export-rootfs
new file mode 100755
index 000000000..70cdcdbb1
--- /dev/null
+++ b/poky/scripts/runqemu-export-rootfs
@@ -0,0 +1,156 @@
+#!/bin/bash
+#
+# Copyright (c) 2005-2009 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+usage() {
+ echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
+}
+
+if [ $# != 2 ]; then
+ usage
+ exit 1
+fi
+
+if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then
+ echo "Unknown command '$1'"
+ usage
+ exit 1
+fi
+
+if [ ! -d "$2" ]; then
+ echo "Error: '$2' does not exist"
+ usage
+ exit 1
+fi
+# Ensure the nfs-export-dir is an absolute path
+NFS_EXPORT_DIR=$(cd "$2" && pwd)
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find the oe-find-native-sysroot script"
+ echo "Did you forget to source your build environment setup script?"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
+
+if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
+ echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
+
+ if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
+ echo "Have you run 'bitbake meta-ide-support'?"
+ else
+ echo "This shouldn't happen - something is missing from your toolchain installation"
+ fi
+ exit 1
+fi
+
+if [ ! -d ~/.runqemu-sdk ]; then
+ mkdir -p ~/.runqemu-sdk
+fi
+
+NFS_INSTANCE=${NFS_INSTANCE:=0}
+EXPORTS=~/.runqemu-sdk/exports$NFS_INSTANCE
+RMTAB=~/.runqemu-sdk/rmtab$NFS_INSTANCE
+NFSPID=~/.runqemu-sdk/nfs$NFS_INSTANCE.pid
+MOUNTPID=~/.runqemu-sdk/mount$NFS_INSTANCE.pid
+
+PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
+PSEUDO_LOCALSTATEDIR="$NFS_EXPORT_DIR/../$(basename $NFS_EXPORT_DIR).pseudo_state"
+export PSEUDO_LOCALSTATEDIR
+
+if [ ! -d "$PSEUDO_LOCALSTATEDIR" ]; then
+ echo "Error: $PSEUDO_LOCALSTATEDIR does not exist."
+ echo "Did you create the export directory using runqemu-extract-sdk?"
+ exit 1
+fi
+
+# NFS server port number
+NFSD_PORT=${NFSD_PORT:=$[ 3049 + 2 * $NFS_INSTANCE ]}
+# mountd port number
+MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]}
+
+## For debugging you would additionally add
+## --debug all
+UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
+
+# See how we were called.
+case "$1" in
+ start)
+ PORTMAP_RUNNING=`ps -ef | grep portmap | grep -v grep`
+ RPCBIND_RUNNING=`ps -ef | grep rpcbind | grep -v grep`
+ if [[ "x$PORTMAP_RUNNING" = "x" && "x$RPCBIND_RUNNING" = "x" ]]; then
+ echo "======================================================="
+ echo "Error: neither rpcbind nor portmap appear to be running"
+ echo "Please install and start one of these services first"
+ echo "======================================================="
+ echo "Tip: for recent Ubuntu hosts, run:"
+ echo " sudo apt-get install rpcbind"
+ echo "Then add OPTIONS=\"-i -w\" to /etc/default/rpcbind and run"
+ echo " sudo service portmap restart"
+
+ exit 1
+ fi
+
+ echo "Creating exports file..."
+ echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS
+
+ echo "Starting User Mode nfsd"
+ echo " $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS"
+ $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS
+ if [ ! $? = 0 ]; then
+ echo "Error starting nfsd"
+ exit 1
+ fi
+ # Check to make sure everything started ok.
+ if [ ! -f $NFSPID ]; then
+ echo "rpc.nfsd did not start correctly"
+ exit 1
+ fi
+ ps -fp `cat $NFSPID` > /dev/null 2> /dev/null
+ if [ ! $? = 0 ]; then
+ echo "rpc.nfsd did not start correctly"
+ exit 1
+ fi
+ echo " "
+ echo "On your target please remember to add the following options for NFS"
+ echo "nfsroot=IP_ADDRESS:$NFS_EXPORT_DIR,nfsvers=3,port=$NFSD_PORT,udp,mountport=$MOUNTD_PORT"
+ ;;
+ stop)
+ if [ -f "$NFSPID" ]; then
+ echo "Stopping rpc.nfsd"
+ kill `cat $NFSPID`
+ rm -f $NFSPID
+ else
+ echo "No PID file, not stopping rpc.nfsd"
+ fi
+ if [ -f "$EXPORTS" ]; then
+ echo "Removing exports file"
+ rm -f $EXPORTS
+ fi
+ ;;
+ restart)
+ $0 stop $NFS_EXPORT_DIR
+ $0 start $NFS_EXPORT_DIR
+ if [ ! $? = 0 ]; then
+ exit 1
+ fi
+ ;;
+ *)
+ echo "$0 {start|stop|restart} <nfs-export-dir>"
+ ;;
+esac
+
+exit 0
diff --git a/poky/scripts/runqemu-extract-sdk b/poky/scripts/runqemu-extract-sdk
new file mode 100755
index 000000000..2a0dd50e0
--- /dev/null
+++ b/poky/scripts/runqemu-extract-sdk
@@ -0,0 +1,104 @@
+#!/bin/bash
+#
+# This utility extracts an SDK image tarball using pseudo, and stores
+# the pseudo database in var/pseudo within the rootfs. If you want to
+# boot QEMU using an nfsroot, you *must* use this script to create the
+# rootfs to ensure it is done correctly with pseudo.
+#
+# Copyright (c) 2010 Intel Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+function usage() {
+ echo "Usage: $0 <image-tarball> <extract-dir>"
+}
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find the oe-find-native-sysroot script"
+ echo "Did you forget to source your build system environment setup script?"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
+PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
+
+ROOTFS_TARBALL=$1
+SDK_ROOTFS_DIR=$2
+
+if [ ! -e "$ROOTFS_TARBALL" ]; then
+ echo "Error: sdk tarball '$ROOTFS_TARBALL' does not exist"
+ usage
+ exit 1
+fi
+
+# Convert SDK_ROOTFS_DIR to a full pathname
+if [[ ${SDK_ROOTFS_DIR:0:1} != "/" ]]; then
+ SDK_ROOTFS_DIR=$(readlink -f $(pwd)/$SDK_ROOTFS_DIR)
+fi
+
+TAR_OPTS=""
+if [[ "$ROOTFS_TARBALL" =~ tar\.bz2$ ]]; then
+ TAR_OPTS="--numeric-owner -xjf"
+fi
+if [[ "$ROOTFS_TARBALL" =~ tar\.gz$ ]]; then
+ TAR_OPTS="--numeric-owner -xzf"
+fi
+if [[ "$ROOTFS_TARBALL" =~ \.tar$ ]]; then
+ TAR_OPTS="--numeric-owner -xf"
+fi
+if [ -z "$TAR_OPTS" ]; then
+ echo "Error: Unable to determine sdk tarball format"
+ echo "Accepted types: .tar / .tar.gz / .tar.bz2"
+ exit 1
+fi
+
+if [ ! -d "$SDK_ROOTFS_DIR" ]; then
+ echo "Creating directory $SDK_ROOTFS_DIR"
+ mkdir -p "$SDK_ROOTFS_DIR"
+fi
+
+pseudo_state_dir="$SDK_ROOTFS_DIR/../$(basename "$SDK_ROOTFS_DIR").pseudo_state"
+pseudo_state_dir="$(readlink -f $pseudo_state_dir)"
+
+if [ -e "$pseudo_state_dir" ]; then
+ echo "Error: $pseudo_state_dir already exists!"
+ echo "Please delete the rootfs tree and pseudo directory manually"
+ echo "if this is really what you want."
+ exit 1
+fi
+
+mkdir -p "$pseudo_state_dir"
+touch "$pseudo_state_dir/pseudo.pid"
+PSEUDO_LOCALSTATEDIR="$pseudo_state_dir"
+export PSEUDO_LOCALSTATEDIR
+
+echo "Extracting rootfs tarball using pseudo..."
+echo "$PSEUDO $PSEUDO_OPTS tar -C \"$SDK_ROOTFS_DIR\" $TAR_OPTS \"$ROOTFS_TARBALL\""
+$PSEUDO $PSEUDO_OPTS tar -C "$SDK_ROOTFS_DIR" $TAR_OPTS "$ROOTFS_TARBALL"
+
+DIRCHECK=`ls -l "$SDK_ROOTFS_DIR" | wc -l`
+if [ "$DIRCHECK" -lt 5 ]; then
+ echo "Warning: I don't see many files in $SDK_ROOTFS_DIR"
+ echo "Please double-check the extraction worked as intended"
+ exit 0
+fi
+
+echo "SDK image successfully extracted to $SDK_ROOTFS_DIR"
+
+exit 0
diff --git a/poky/scripts/runqemu-gen-tapdevs b/poky/scripts/runqemu-gen-tapdevs
new file mode 100755
index 000000000..11de318c1
--- /dev/null
+++ b/poky/scripts/runqemu-gen-tapdevs
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# Create a "bank" of tap network devices that can be used by the
+# runqemu script. This script needs to be run as root, and will
+# use the tunctl binary from the build system sysroot. Note: many Linux
+# distros these days still use an older version of tunctl which does not
+# support the group permissions option, hence the need to use the build
+# system provided version.
+#
+# Copyright (C) 2010 Intel Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
+ echo "Where <uid> is the numeric user id the tap devices will be owned by"
+ echo "Where <gid> is the numeric group id the tap devices will be owned by"
+ echo "<num> is the number of tap devices to create (0 to remove all)"
+ echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
+ echo "e.g. $ bitbake qemu-helper-native"
+ echo "$ sudo $0 1000 1000 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
+ exit 1
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script must be run with root privileges"
+ exit
+fi
+
+if [ $# -ne 4 ]; then
+ echo "Error: Incorrect number of arguments"
+ usage
+fi
+
+TUID=$1
+GID=$2
+COUNT=$3
+STAGING_BINDIR_NATIVE=$4
+
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
+if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
+ echo "Error: $TUNCTL is not an executable"
+ usage
+fi
+
+SCRIPT_DIR=`dirname $0`
+RUNQEMU_IFUP="$SCRIPT_DIR/runqemu-ifup"
+if [ ! -x "$RUNQEMU_IFUP" ]; then
+ echo "Error: Unable to find the runqemu-ifup script in $SCRIPT_DIR"
+ exit 1
+fi
+
+IFCONFIG=`which ip 2> /dev/null`
+if [ -z "$IFCONFIG" ]; then
+ # Is it ever anywhere else?
+ IFCONFIG=/sbin/ip
+fi
+if [ ! -x "$IFCONFIG" ]; then
+ echo "$IFCONFIG cannot be executed"
+ exit 1
+fi
+
+if [ $COUNT -ge 0 ]; then
+ # Ensure we start with a clean slate
+ for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ $TUNCTL -d $tap
+ done
+ rm -f /etc/runqemu-nosudo
+else
+ echo "Error: Incorrect count: $COUNT"
+ exit 1
+fi
+
+if [ $COUNT -gt 0 ]; then
+ echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
+ for ((index=0; index < $COUNT; index++)); do
+ echo "Creating tap$index"
+ ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
+ if [ $? -ne 0 ]; then
+ echo "Error running tunctl: $ifup"
+ exit 1
+ fi
+ done
+
+ echo "Note: For systems running NetworkManager, it's recommended"
+ echo "Note: that the tap devices be set as unmanaged in the"
+ echo "Note: NetworkManager.conf file. Add the following lines to"
+ echo "Note: /etc/NetworkManager/NetworkManager.conf"
+ echo "[keyfile]"
+ echo "unmanaged-devices=interface-name:tap*"
+
+ # The runqemu script will check for this file, and if it exists,
+ # will use the existing bank of tap devices without creating
+ # additional ones via sudo.
+ touch /etc/runqemu-nosudo
+fi
diff --git a/poky/scripts/runqemu-ifdown b/poky/scripts/runqemu-ifdown
new file mode 100755
index 000000000..ffbc9de44
--- /dev/null
+++ b/poky/scripts/runqemu-ifdown
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# QEMU network configuration script to bring down tap devices. This
+# utility needs to be run as root, and will use the tunctl binary
+# from the native sysroot.
+#
+# If you find yourself calling this script a lot, you can add the
+# the following to your /etc/sudoers file to be able to run this
+# command without entering your password each time:
+#
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifup
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifdown
+#
+# Copyright (c) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script (runqemu-ifdown) must be run with root privileges"
+ exit 1
+fi
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+TAP=$1
+STAGING_BINDIR_NATIVE=$2
+
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
+if [ ! -e "$TUNCTL" ]; then
+ echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
+ exit 1
+fi
+
+$TUNCTL -d $TAP
+
+# cleanup the remaining iptables rules
+IPTABLES=`which iptables 2> /dev/null`
+if [ "x$IPTABLES" = "x" ]; then
+ IPTABLES=/sbin/iptables
+fi
+if [ ! -x "$IPTABLES" ]; then
+ echo "$IPTABLES cannot be executed"
+ exit 1
+fi
+n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
+dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
+$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
diff --git a/poky/scripts/runqemu-ifup b/poky/scripts/runqemu-ifup
new file mode 100755
index 000000000..59a15eaa2
--- /dev/null
+++ b/poky/scripts/runqemu-ifup
@@ -0,0 +1,121 @@
+#!/bin/bash
+#
+# QEMU network interface configuration script. This utility needs to
+# be run as root, and will use the tunctl binary from a native sysroot.
+# Note: many Linux distros these days still use an older version of
+# tunctl which does not support the group permissions option, hence
+# the need to use build system's version.
+#
+# If you find yourself calling this script a lot, you can add the
+# the following to your /etc/sudoers file to be able to run this
+# command without entering your password each time:
+#
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifup
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifdown
+#
+# If you'd like to create a bank of tap devices at once, you should use
+# the runqemu-gen-tapdevs script instead. If tap devices are set up using
+# that script, the runqemu script will never end up calling this
+# script.
+#
+# Copyright (c) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script (runqemu-ifup) must be run with root privileges"
+ exit 1
+fi
+
+if [ $# -ne 3 ]; then
+ usage
+ exit 1
+fi
+
+USERID="-u $1"
+GROUP="-g $2"
+STAGING_BINDIR_NATIVE=$3
+
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
+if [ ! -x "$TUNCTL" ]; then
+ echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
+ exit 1
+fi
+
+TAP=`$TUNCTL -b $GROUP 2>&1`
+STATUS=$?
+if [ $STATUS -ne 0 ]; then
+# If tunctl -g fails, try using tunctl -u, for older host kernels
+# which do not support the TUNSETGROUP ioctl
+ TAP=`$TUNCTL -b $USERID 2>&1`
+ STATUS=$?
+ if [ $STATUS -ne 0 ]; then
+ echo "tunctl failed:"
+ exit 1
+ fi
+fi
+
+IFCONFIG=`which ip 2> /dev/null`
+if [ "x$IFCONFIG" = "x" ]; then
+ # better than nothing...
+ IFCONFIG=/sbin/ip
+fi
+if [ ! -x "$IFCONFIG" ]; then
+ echo "$IFCONFIG cannot be executed"
+ exit 1
+fi
+
+IPTABLES=`which iptables 2> /dev/null`
+if [ "x$IPTABLES" = "x" ]; then
+ IPTABLES=/sbin/iptables
+fi
+if [ ! -x "$IPTABLES" ]; then
+ echo "$IPTABLES cannot be executed"
+ exit 1
+fi
+
+n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
+$IFCONFIG addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
+STATUS=$?
+if [ $STATUS -ne 0 ]; then
+ echo "Failed to set up IP addressing on $TAP"
+ exit 1
+fi
+$IFCONFIG link set dev $TAP up
+STATUS=$?
+if [ $STATUS -ne 0 ]; then
+ echo "Failed to bring up $TAP"
+ exit 1
+fi
+
+dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+$IFCONFIG route add to 192.168.7.$dest dev $TAP
+STATUS=$?
+if [ $STATUS -ne 0 ]; then
+ echo "Failed to add route to 192.168.7.$dest using $TAP"
+ exit 1
+fi
+
+# setup NAT for tap0 interface to have internet access in QEMU
+$IPTABLES -A POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
+$IPTABLES -A POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
+echo 1 > /proc/sys/net/ipv4/ip_forward
+echo 1 > /proc/sys/net/ipv4/conf/$TAP/proxy_arp
+$IPTABLES -P FORWARD ACCEPT
+
+echo $TAP
diff --git a/poky/scripts/runqemu.README b/poky/scripts/runqemu.README
new file mode 100644
index 000000000..da9abd7df
--- /dev/null
+++ b/poky/scripts/runqemu.README
@@ -0,0 +1,42 @@
+Using OE images with QEMU
+=========================
+
+OE-Core can generate qemu bootable kernels and images with can be used
+on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC
+and x86 (32 and 64 bit) images. The scripts can be used within the OE build
+system or externaly.
+
+The runqemu script is run as:
+
+ runqemu <machine> <zimage> <filesystem>
+
+where:
+
+ <machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64)
+ <zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
+ <filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory
+
+If <machine> isn't specified, the script will try to detect the machine name
+from the name of the <zimage> file.
+
+If <filesystem> isn't specified, nfs booting will be assumed.
+
+When used within the build system, it will default to qemuarm, ext2 and the last kernel and
+core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look
+for sato and minimal images.
+
+Full usage instructions can be seen by running the command with no options specified.
+
+
+Notes
+=====
+
+ - The scripts run qemu using sudo. Change perms on /dev/net/tun to
+ run as non root. The runqemu-gen-tapdevs script can also be used by
+ root to prepopulate the appropriate network devices.
+ - You can access the host computer at 192.168.7.1 within the image.
+ - Your qemu system will be accessible as 192.168.7.2.
+ - The script extracts the root filesystem specified under pseudo and sets up a userspace
+ NFS server to share the image over by default meaning the filesystem can be accessed by
+ both the host and guest systems.
+
diff --git a/poky/scripts/send-error-report b/poky/scripts/send-error-report
new file mode 100755
index 000000000..15b5e8491
--- /dev/null
+++ b/poky/scripts/send-error-report
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+
+# Sends an error report (if the report-error class was enabled) to a
+# remote server.
+#
+# Copyright (C) 2013 Intel Corporation
+# Author: Andreea Proca <andreea.b.proca@intel.com>
+# Author: Michael Wood <michael.g.wood@intel.com>
+
+import urllib.request, urllib.error
+import sys
+import json
+import os
+import subprocess
+import argparse
+import logging
+
+scripts_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib')
+sys.path.insert(0, scripts_lib_path)
+import argparse_oe
+
+version = "0.3"
+
+log = logging.getLogger("send-error-report")
+logging.basicConfig(format='%(levelname)s: %(message)s')
+
+def getPayloadLimit(url):
+ req = urllib.request.Request(url, None)
+ try:
+ response = urllib.request.urlopen(req)
+ except urllib.error.URLError as e:
+ # Use this opportunity to bail out if we can't even contact the server
+ log.error("Could not contact server: " + url)
+ log.error(e.reason)
+ sys.exit(1)
+ try:
+ ret = json.loads(response.read())
+ max_log_size = ret.get('max_log_size', 0)
+ return int(max_log_size)
+ except:
+ pass
+
+ return 0
+
+def ask_for_contactdetails():
+ print("Please enter your name and your email (optionally), they'll be saved in the file you send.")
+ username = input("Name (required): ")
+ email = input("E-mail (not required): ")
+ return username, email
+
+def edit_content(json_file_path):
+ edit = input("Review information before sending? (y/n): ")
+ if 'y' in edit or 'Y' in edit:
+ editor = os.environ.get('EDITOR', None)
+ if editor:
+ subprocess.check_call([editor, json_file_path])
+ else:
+ log.error("Please set your EDITOR value")
+ sys.exit(1)
+ return True
+ return False
+
+def prepare_data(args):
+ # attempt to get the max_log_size from the server's settings
+ max_log_size = getPayloadLimit("http://"+args.server+"/ClientPost/JSON")
+
+ if not os.path.isfile(args.error_file):
+ log.error("No data file found.")
+ sys.exit(1)
+
+ home = os.path.expanduser("~")
+ userfile = os.path.join(home, ".oe-send-error")
+
+ try:
+ with open(userfile, 'r') as userfile_fp:
+ if len(args.name) == 0:
+ args.name = userfile_fp.readline()
+ else:
+ #use empty readline to increment the fp
+ userfile_fp.readline()
+
+ if len(args.email) == 0:
+ args.email = userfile_fp.readline()
+ except:
+ pass
+
+ if args.assume_yes == True and len(args.name) == 0:
+ log.error("Name needs to be provided either via "+userfile+" or as an argument (-n).")
+ sys.exit(1)
+
+ while len(args.name) <= 0 and len(args.name) < 50:
+ print("\nName needs to be given and must not more than 50 characters.")
+ args.name, args.email = ask_for_contactdetails()
+
+ with open(userfile, 'w') as userfile_fp:
+ userfile_fp.write(args.name.strip() + "\n")
+ userfile_fp.write(args.email.strip() + "\n")
+
+ with open(args.error_file, 'r') as json_fp:
+ data = json_fp.read()
+
+ jsondata = json.loads(data)
+ jsondata['username'] = args.name.strip()
+ jsondata['email'] = args.email.strip()
+ jsondata['link_back'] = args.link_back.strip()
+ # If we got a max_log_size then use this to truncate to get the last
+ # max_log_size bytes from the end
+ if max_log_size != 0:
+ for fail in jsondata['failures']:
+ if len(fail['log']) > max_log_size:
+ print("Truncating log to allow for upload")
+ fail['log'] = fail['log'][-max_log_size:]
+
+ data = json.dumps(jsondata, indent=4, sort_keys=True)
+
+ # Write back the result which will contain all fields filled in and
+ # any post processing done on the log data
+ with open(args.error_file, "w") as json_fp:
+ if data:
+ json_fp.write(data)
+
+
+ if args.assume_yes == False and edit_content(args.error_file):
+ #We'll need to re-read the content if we edited it
+ with open(args.error_file, 'r') as json_fp:
+ data = json_fp.read()
+
+ return data.encode('utf-8')
+
+
+def send_data(data, args):
+ headers={'Content-type': 'application/json', 'User-Agent': "send-error-report/"+version}
+
+ if args.json:
+ url = "http://"+args.server+"/ClientPost/JSON/"
+ else:
+ url = "http://"+args.server+"/ClientPost/"
+
+ req = urllib.request.Request(url, data=data, headers=headers)
+ try:
+ response = urllib.request.urlopen(req)
+ except urllib.error.HTTPError as e:
+ logging.error(e.reason)
+ sys.exit(1)
+
+ print(response.read())
+
+
+if __name__ == '__main__':
+ arg_parse = argparse_oe.ArgumentParser(description="This scripts will send an error report to your specified error-report-web server.")
+
+ arg_parse.add_argument("error_file",
+ help="Generated error report file location",
+ type=str)
+
+ arg_parse.add_argument("-y",
+ "--assume-yes",
+ help="Assume yes to all queries and do not prompt",
+ action="store_true")
+
+ arg_parse.add_argument("-s",
+ "--server",
+ help="Server to send error report to",
+ type=str,
+ default="errors.yoctoproject.org")
+
+ arg_parse.add_argument("-e",
+ "--email",
+ help="Email address to be used for contact",
+ type=str,
+ default="")
+
+ arg_parse.add_argument("-n",
+ "--name",
+ help="Submitter name used to identify your error report",
+ type=str,
+ default="")
+
+ arg_parse.add_argument("-l",
+ "--link-back",
+ help="A url to link back to this build from the error report server",
+ type=str,
+ default="")
+
+ arg_parse.add_argument("-j",
+ "--json",
+ help="Return the result in json format, silences all other output",
+ action="store_true")
+
+
+
+ args = arg_parse.parse_args()
+
+ if (args.json == False):
+ print("Preparing to send errors to: "+args.server)
+
+ data = prepare_data(args)
+ send_data(data, args)
+
+ sys.exit(0)
diff --git a/poky/scripts/send-pull-request b/poky/scripts/send-pull-request
new file mode 100755
index 000000000..883deacb0
--- /dev/null
+++ b/poky/scripts/send-pull-request
@@ -0,0 +1,184 @@
+#!/bin/bash
+#
+# Copyright (c) 2010-2011, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+#
+# This script is intended to be used to send a patch series prepared by the
+# create-pull-request script to Open Embedded and The Yocto Project, as well
+# as to related projects and layers.
+#
+
+AUTO=0
+AUTO_CL=0
+GITSOBCC="--suppress-cc=all"
+
+# Prevent environment leakage to these vars.
+unset TO
+unset CC
+unset AUTO_CC
+unset EXTRA_CC
+
+usage()
+{
+cat <<EOM
+Usage: $(basename $0) [-h] [-a] [-c] [[-t email]...] -p pull-dir
+ -a Send the cover letter to every recipient listed in Cc and
+ Signed-off-by lines found in the cover letter and the patches.
+ This option implies -c.
+ -c Expand the Cc list for the individual patches using the Cc and
+ Signed-off-by lines from the same patch.
+ -C Add extra CC to each email sent.
+ -p pull-dir Directory containing summary and patch files
+ -t email Explicitly add email to the recipients
+EOM
+}
+
+# Collect addresses from a patch into AUTO_CC
+# $1: a patch file
+harvest_recipients()
+{
+ PATCH=$1
+ export IFS=$',\n'
+ for REGX in "^[Cc][Cc]: *" "^[Ss]igned-[Oo]ff-[Bb]y: *"; do
+ for EMAIL in $(sed '/^---$/q' $PATCH | grep -e "$REGX" | sed "s/$REGX//"); do
+ if [ "${AUTO_CC/$EMAIL/}" == "$AUTO_CC" ] && [ -n "$EMAIL" ]; then
+ if [ -z "$AUTO_CC" ]; then
+ AUTO_CC=$EMAIL;
+ else
+ AUTO_CC="$AUTO_CC,$EMAIL";
+ fi
+ fi
+ done
+ done
+ unset IFS
+}
+
+# Parse and verify arguments
+while getopts "acC:hp:t:" OPT; do
+ case $OPT in
+ a)
+ AUTO=1
+ GITSOBCC="--signed-off-by-cc"
+ AUTO_CL=1
+ ;;
+ c)
+ AUTO=1
+ GITSOBCC="--signed-off-by-cc"
+ ;;
+ C)
+ EXTRA_CC="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ p)
+ PDIR=${OPTARG%/}
+ if [ ! -d $PDIR ]; then
+ echo "ERROR: pull-dir \"$PDIR\" does not exist."
+ usage
+ exit 1
+ fi
+ ;;
+ t)
+ if [ -n "$TO" ]; then
+ TO="$TO,$OPTARG"
+ else
+ TO="$OPTARG"
+ fi
+ ;;
+ esac
+done
+
+if [ -z "$PDIR" ]; then
+ echo "ERROR: you must specify a pull-dir."
+ usage
+ exit 1
+fi
+
+
+# Verify the cover letter is complete and free of tokens
+if [ -e $PDIR/0000-cover-letter.patch ]; then
+ CL="$PDIR/0000-cover-letter.patch"
+ for TOKEN in SUBJECT BLURB; do
+ grep -q "*** $TOKEN HERE ***" "$CL"
+ if [ $? -eq 0 ]; then
+ echo "ERROR: Please edit $CL and try again (Look for '*** $TOKEN HERE ***')."
+ exit 1
+ fi
+ done
+else
+ echo "WARNING: No cover letter will be sent."
+fi
+
+# Harvest emails from the generated patches and populate AUTO_CC.
+if [ $AUTO_CL -eq 1 ]; then
+ for PATCH in $PDIR/*.patch; do
+ harvest_recipients $PATCH
+ done
+fi
+
+AUTO_TO="$(git config sendemail.to)"
+if [ -n "$AUTO_TO" ]; then
+ if [ -n "$TO" ]; then
+ TO="$TO,$AUTO_TO"
+ else
+ TO="$AUTO_TO"
+ fi
+fi
+
+if [ -z "$TO" ] && [ -z "$AUTO_CC" ]; then
+ echo "ERROR: you have not specified any recipients."
+ usage
+ exit 1
+fi
+
+
+# Convert the collected addresses into git-send-email argument strings
+export IFS=$','
+GIT_TO=$(for R in $TO; do echo -n "--to='$R' "; done)
+GIT_CC=$(for R in $AUTO_CC; do echo -n "--cc='$R' "; done)
+GIT_EXTRA_CC=$(for R in $EXTRA_CC; do echo -n "--cc='$R' "; done)
+unset IFS
+
+# Handoff to git-send-email. It will perform the send confirmation.
+# Mail threading was already handled by git-format-patch in
+# create-pull-request, so we must not allow git-send-email to
+# add In-Reply-To and References headers again.
+PATCHES=$(echo $PDIR/*.patch)
+if [ $AUTO_CL -eq 1 ]; then
+ # Send the cover letter to every recipient, both specified as well as
+ # harvested. Then remove it from the patches list.
+ # --no-thread is redundant here (only sending a single message) and
+ # merely added for the sake of consistency.
+ eval "git send-email $GIT_TO $GIT_CC $GIT_EXTRA_CC --confirm=always --no-thread --suppress-cc=all $CL"
+ if [ $? -eq 1 ]; then
+ echo "ERROR: failed to send cover-letter with automatic recipients."
+ exit 1
+ fi
+ PATCHES=${PATCHES/"$CL"/}
+fi
+
+# Send the patch to the specified recipients and, if -c was specified, those git
+# finds in this specific patch.
+eval "git send-email $GIT_TO $GIT_EXTRA_CC --confirm=always --no-thread $GITSOBCC $PATCHES"
+if [ $? -eq 1 ]; then
+ echo "ERROR: failed to send patches."
+ exit 1
+fi
diff --git a/poky/scripts/sstate-cache-management.sh b/poky/scripts/sstate-cache-management.sh
new file mode 100755
index 000000000..2ab450ab5
--- /dev/null
+++ b/poky/scripts/sstate-cache-management.sh
@@ -0,0 +1,469 @@
+#!/bin/bash
+
+# Copyright (c) 2012 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# Global vars
+cache_dir=
+confirm=
+fsym=
+total_deleted=0
+verbose=
+debug=0
+
+usage () {
+ cat << EOF
+Welcome to sstate cache management utilities.
+sstate-cache-management.sh <OPTION>
+
+Options:
+ -h, --help
+ Display this help and exit.
+
+ --cache-dir=<sstate cache dir>
+ Specify sstate cache directory, will use the environment
+ variable SSTATE_CACHE_DIR if it is not specified.
+
+ --extra-archs=<arch1>,<arch2>...<archn>
+ Specify list of architectures which should be tested, this list
+ will be extended with native arch, allarch and empty arch. The
+ script won't be trying to generate list of available archs from
+ AVAILTUNES in tune files.
+
+ --extra-layer=<layer1>,<layer2>...<layern>
+ Specify the layer which will be used for searching the archs,
+ it will search the meta and meta-* layers in the top dir by
+ default, and will search meta, meta-*, <layer1>, <layer2>,
+ ...<layern> when specified. Use "," as the separator.
+
+ This is useless for --stamps-dir or when --extra-archs is used.
+
+ -d, --remove-duplicated
+ Remove the duplicated sstate cache files of one package, only
+ the newest one will be kept. The duplicated sstate cache files
+ of one package must have the same arch, which means sstate cache
+ files with multiple archs are not considered duplicate.
+
+ Conflicts with --stamps-dir.
+
+ --stamps-dir=<dir1>,<dir2>...<dirn>
+ Specify the build directory's stamps directories, the sstate
+ cache file which IS USED by these build diretories will be KEPT,
+ other sstate cache files in cache-dir will be removed. Use ","
+ as the separator. For example:
+ --stamps-dir=build1/tmp/stamps,build2/tmp/stamps
+
+ Conflicts with --remove-duplicated.
+
+ -L, --follow-symlink
+ Remove both the symbol link and the destination file, default: no.
+
+ -y, --yes
+ Automatic yes to prompts; assume "yes" as answer to all prompts
+ and run non-interactively.
+
+ -v, --verbose
+ Explain what is being done.
+
+ -D, --debug
+ Show debug info, repeat for more debug info.
+
+EOF
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 0
+fi
+
+# Echo no files to remove
+no_files () {
+ echo No files to remove
+}
+
+# Echo nothing to do
+do_nothing () {
+ echo Nothing to do
+}
+
+# Read the input "y"
+read_confirm () {
+ echo "$total_deleted out of $total_files files will be removed! "
+ if [ "$confirm" != "y" ]; then
+ echo "Do you want to continue (y/n)? "
+ while read confirm; do
+ [ "$confirm" = "Y" -o "$confirm" = "y" -o "$confirm" = "n" \
+ -o "$confirm" = "N" ] && break
+ echo "Invalid input \"$confirm\", please input 'y' or 'n': "
+ done
+ else
+ echo
+ fi
+}
+
+# Print error information and exit.
+echo_error () {
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+# Generate the remove list:
+#
+# * Add .done/.siginfo to the remove list
+# * Add destination of symlink to the remove list
+#
+# $1: output file, others: sstate cache file (.tgz)
+gen_rmlist (){
+ local rmlist_file="$1"
+ shift
+ local files="$@"
+ for i in $files; do
+ echo $i >> $rmlist_file
+ # Add the ".siginfo"
+ if [ -e $i.siginfo ]; then
+ echo $i.siginfo >> $rmlist_file
+ fi
+ # Add the destination of symlink
+ if [ -L "$i" ]; then
+ if [ "$fsym" = "y" ]; then
+ dest="`readlink -e $i`"
+ if [ -n "$dest" ]; then
+ echo $dest >> $rmlist_file
+ # Remove the .siginfo when .tgz is removed
+ if [ -f "$dest.siginfo" ]; then
+ echo $dest.siginfo >> $rmlist_file
+ fi
+ fi
+ fi
+ # Add the ".tgz.done" and ".siginfo.done" (may exist in the future)
+ base_fn="${i##/*/}"
+ t_fn="$base_fn.done"
+ s_fn="$base_fn.siginfo.done"
+ for d in $t_fn $s_fn; do
+ if [ -f $cache_dir/$d ]; then
+ echo $cache_dir/$d >> $rmlist_file
+ fi
+ done
+ fi
+ done
+}
+
+# Remove the duplicated cache files for the pkg, keep the newest one
+remove_duplicated () {
+
+ local topdir
+ local oe_core_dir
+ local tunedirs
+ local all_archs
+ local all_machines
+ local ava_archs
+ local arch
+ local file_names
+ local sstate_files_list
+ local fn_tmp
+ local list_suffix=`mktemp` || exit 1
+
+ if [ -z "$extra_archs" ] ; then
+ # Find out the archs in all the layers
+ echo "Figuring out the archs in the layers ... "
+ oe_core_dir=$(dirname $(dirname $(readlink -e $0)))
+ topdir=$(dirname $oe_core_dir)
+ tunedirs="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/include' 2>/dev/null`"
+ [ -n "$tunedirs" ] || echo_error "Can't find the tune directory"
+ all_machines="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/*' -name '*.conf' 2>/dev/null | sed -e 's/.*\///' -e 's/.conf$//'`"
+ all_archs=`grep -r -h "^AVAILTUNES .*=" $tunedirs | sed -e 's/.*=//' -e 's/\"//g'`
+ fi
+
+ # Use the "_" to substitute "-", e.g., x86-64 to x86_64, but not for extra_archs which can be something like cortexa9t2-vfp-neon
+ # Sort to remove the duplicated ones
+ # Add allarch and builder arch (native)
+ builder_arch=$(uname -m)
+ all_archs="$(echo allarch $all_archs $all_machines $builder_arch \
+ | sed -e 's/-/_/g' -e 's/ /\n/g' | sort -u) $extra_archs"
+ echo "Done"
+
+ # Total number of files including sstate-, .siginfo and .done files
+ total_files=`find $cache_dir -name 'sstate*' | wc -l`
+ # Save all the sstate files in a file
+ sstate_files_list=`mktemp` || exit 1
+ find $cache_dir -name 'sstate:*:*:*:*:*:*:*.tgz*' >$sstate_files_list
+
+ echo "Figuring out the suffixes in the sstate cache dir ... "
+ sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $sstate_files_list | sort -u`"
+ echo "Done"
+ echo "The following suffixes have been found in the cache dir:"
+ echo $sstate_suffixes
+
+ echo "Figuring out the archs in the sstate cache dir ... "
+ # Using this SSTATE_PKGSPEC definition it's 6th colon separated field
+ # SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
+ for arch in $all_archs; do
+ grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
+ [ $? -eq 0 ] && ava_archs="$ava_archs $arch"
+ # ${builder_arch}_$arch used by toolchain sstate
+ grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
+ [ $? -eq 0 ] && ava_archs="$ava_archs ${builder_arch}_$arch"
+ done
+ echo "Done"
+ echo "The following archs have been found in the cache dir:"
+ echo $ava_archs
+ echo ""
+
+ # Save the file list which needs to be removed
+ local remove_listdir=`mktemp -d` || exit 1
+ for suffix in $sstate_suffixes; do
+ if [ "$suffix" = "populate_lic" ] ; then
+ echo "Skipping populate_lic, because removing duplicates doesn't work correctly for them (use --stamps-dir instead)"
+ continue
+ fi
+ # Total number of files including .siginfo and .done files
+ total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list | wc -l 2>/dev/null`
+ total_tgz_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz$" $sstate_files_list | wc -l 2>/dev/null`
+ # Save the file list to a file, some suffix's file may not exist
+ grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list >$list_suffix 2>/dev/null
+ local deleted_tgz=0
+ local deleted_files=0
+ for ext in tgz tgz.siginfo tgz.done; do
+ echo "Figuring out the sstate:xxx_$suffix.$ext ... "
+ # Uniq BPNs
+ file_names=`for arch in $ava_archs ""; do
+ sed -ne "s%.*/sstate:\([^:]*\):[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$%\1%p" $list_suffix
+ done | sort -u`
+
+ fn_tmp=`mktemp` || exit 1
+ rm_list="$remove_listdir/sstate:xxx_$suffix"
+ for fn in $file_names; do
+ [ -z "$verbose" ] || echo "Analyzing sstate:$fn-xxx_$suffix.${ext}"
+ for arch in $ava_archs ""; do
+ grep -h ".*/sstate:$fn:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$" $list_suffix >$fn_tmp
+ if [ -s $fn_tmp ] ; then
+ [ $debug -gt 1 ] && echo "Available files for $fn-$arch- with suffix $suffix.${ext}:" && cat $fn_tmp
+ # Use the modification time
+ to_del=$(ls -t $(cat $fn_tmp) | sed -n '1!p')
+ [ $debug -gt 2 ] && echo "Considering to delete: $to_del"
+ # The sstate file which is downloaded from the SSTATE_MIRROR is
+ # put in SSTATE_DIR, and there is a symlink in SSTATE_DIR/??/ to
+ # it, so filter it out from the remove list if it should not be
+ # removed.
+ to_keep=$(ls -t $(cat $fn_tmp) | sed -n '1p')
+ [ $debug -gt 2 ] && echo "Considering to keep: $to_keep"
+ for k in $to_keep; do
+ if [ -L "$k" ]; then
+ # The symlink's destination
+ k_dest="`readlink -e $k`"
+ # Maybe it is the one in cache_dir
+ k_maybe="$cache_dir/${k##/*/}"
+ # Remove it from the remove list if they are the same.
+ if [ "$k_dest" = "$k_maybe" ]; then
+ to_del="`echo $to_del | sed 's#'\"$k_maybe\"'##g'`"
+ fi
+ fi
+ done
+ rm -f $fn_tmp
+ [ $debug -gt 2 ] && echo "Decided to delete: $to_del"
+ gen_rmlist $rm_list.$ext "$to_del"
+ fi
+ done
+ done
+ done
+ deleted_tgz=`cat $rm_list.* 2>/dev/null | grep ".tgz$" | wc -l`
+ deleted_files=`cat $rm_list.* 2>/dev/null | wc -l`
+ [ "$deleted_files" -gt 0 -a $debug -gt 0 ] && cat $rm_list.*
+ echo "($deleted_tgz out of $total_tgz_suffix .tgz files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
+ let total_deleted=$total_deleted+$deleted_files
+ done
+ deleted_tgz=0
+ rm_old_list=$remove_listdir/sstate-old-filenames
+ find $cache_dir -name 'sstate-*.tgz' >$rm_old_list
+ [ -s "$rm_old_list" ] && deleted_tgz=`cat $rm_old_list | grep ".tgz$" | wc -l`
+ [ -s "$rm_old_list" ] && deleted_files=`cat $rm_old_list | wc -l`
+ [ -s "$rm_old_list" -a $debug -gt 0 ] && cat $rm_old_list
+ echo "($deleted_tgz .tgz files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
+ let total_deleted=$total_deleted+$deleted_files
+
+ rm -f $list_suffix
+ rm -f $sstate_files_list
+ if [ $total_deleted -gt 0 ]; then
+ read_confirm
+ if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
+ for list in `ls $remove_listdir/`; do
+ echo "Removing $list.tgz (`cat $remove_listdir/$list | wc -w` files) ... "
+ # Remove them one by one to avoid the argument list too long error
+ for i in `cat $remove_listdir/$list`; do
+ rm -f $verbose $i
+ done
+ echo "Done"
+ done
+ echo "$total_deleted files have been removed!"
+ else
+ do_nothing
+ fi
+ else
+ no_files
+ fi
+ [ -d $remove_listdir ] && rm -fr $remove_listdir
+}
+
+# Remove the sstate file by stamps dir, the file not used by the stamps dir
+# will be removed.
+rm_by_stamps (){
+
+ local cache_list=`mktemp` || exit 1
+ local keep_list=`mktemp` || exit 1
+ local rm_list=`mktemp` || exit 1
+ local sums
+ local all_sums
+
+ # Total number of files including sstate-, .siginfo and .done files
+ total_files=`find $cache_dir -type f -name 'sstate*' | wc -l`
+ # Save all the state file list to a file
+ find $cache_dir -type f -name 'sstate*' | sort -u -o $cache_list
+
+ echo "Figuring out the suffixes in the sstate cache dir ... "
+ local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $cache_list | sort -u`"
+ echo "Done"
+ echo "The following suffixes have been found in the cache dir:"
+ echo $sstate_suffixes
+
+ # Figure out all the md5sums in the stamps dir.
+ echo "Figuring out all the md5sums in stamps dir ... "
+ for i in $sstate_suffixes; do
+ # There is no "\.sigdata" but "_setcene" when it is mirrored
+ # from the SSTATE_MIRRORS, use them to figure out the sum.
+ sums=`find $stamps -maxdepth 3 -name "*.do_$i.*" \
+ -o -name "*.do_${i}_setscene.*" | \
+ sed -ne 's#.*_setscene\.##p' -e 's#.*\.sigdata\.##p' | \
+ sed -e 's#\..*##' | sort -u`
+ all_sums="$all_sums $sums"
+ done
+ echo "Done"
+
+ echo "Figuring out the files which will be removed ... "
+ for i in $all_sums; do
+ grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:${i}_.*" $cache_list >>$keep_list
+ done
+ echo "Done"
+
+ if [ -s $keep_list ]; then
+ sort -u $keep_list -o $keep_list
+ to_del=`comm -1 -3 $keep_list $cache_list`
+ gen_rmlist $rm_list "$to_del"
+ let total_deleted=`cat $rm_list | sort -u | wc -w`
+ if [ $total_deleted -gt 0 ]; then
+ [ $debug -gt 0 ] && cat $rm_list | sort -u
+ read_confirm
+ if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
+ echo "Removing sstate cache files ... ($total_deleted files)"
+ # Remove them one by one to avoid the argument list too long error
+ for i in `cat $rm_list | sort -u`; do
+ rm -f $verbose $i
+ done
+ echo "$total_deleted files have been removed"
+ else
+ do_nothing
+ fi
+ else
+ no_files
+ fi
+ else
+ echo_error "All files in cache dir will be removed! Abort!"
+ fi
+
+ rm -f $cache_list
+ rm -f $keep_list
+ rm -f $rm_list
+}
+
+# Parse arguments
+while [ -n "$1" ]; do
+ case $1 in
+ --cache-dir=*)
+ cache_dir=`echo $1 | sed -e 's#^--cache-dir=##' | xargs readlink -e`
+ [ -d "$cache_dir" ] || echo_error "Invalid argument to --cache-dir"
+ shift
+ ;;
+ --remove-duplicated|-d)
+ rm_duplicated="y"
+ shift
+ ;;
+ --yes|-y)
+ confirm="y"
+ shift
+ ;;
+ --follow-symlink|-L)
+ fsym="y"
+ shift
+ ;;
+ --extra-archs=*)
+ extra_archs=`echo $1 | sed -e 's#^--extra-archs=##' -e 's#,# #g'`
+ [ -n "$extra_archs" ] || echo_error "Invalid extra arch parameter"
+ shift
+ ;;
+ --extra-layer=*)
+ extra_layers=`echo $1 | sed -e 's#^--extra-layer=##' -e 's#,# #g'`
+ [ -n "$extra_layers" ] || echo_error "Invalid extra layer parameter"
+ for i in $extra_layers; do
+ l=`readlink -e $i`
+ if [ -d "$l" ]; then
+ layers="$layers $l"
+ else
+ echo_error "Can't find layer $i"
+ fi
+ done
+ shift
+ ;;
+ --stamps-dir=*)
+ stamps=`echo $1 | sed -e 's#^--stamps-dir=##' -e 's#,# #g'`
+ [ -n "$stamps" ] || echo_error "Invalid stamps dir $i"
+ for i in $stamps; do
+ [ -d "$i" ] || echo_error "Invalid stamps dir $i"
+ done
+ shift
+ ;;
+ --verbose|-v)
+ verbose="-v"
+ shift
+ ;;
+ --debug|-D)
+ debug=`expr $debug + 1`
+ echo "Debug level $debug"
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "Invalid arguments $*"
+ echo_error "Try 'sstate-cache-management.sh -h' for more information."
+ ;;
+ esac
+done
+
+# sstate cache directory, use environment variable SSTATE_CACHE_DIR
+# if it was not specified, otherwise, error.
+[ -n "$cache_dir" ] || cache_dir=$SSTATE_CACHE_DIR
+[ -n "$cache_dir" ] || echo_error "No cache dir found!"
+[ -d "$cache_dir" ] || echo_error "Invalid cache directory \"$cache_dir\""
+
+[ -n "$rm_duplicated" -a -n "$stamps" ] && \
+ echo_error "Can not use both --remove-duplicated and --stamps-dir"
+
+[ "$rm_duplicated" = "y" ] && remove_duplicated
+[ -n "$stamps" ] && rm_by_stamps
+[ -z "$rm_duplicated" -a -z "$stamps" ] && \
+ echo "What do you want to do?"
+exit 0
diff --git a/poky/scripts/sstate-diff-machines.sh b/poky/scripts/sstate-diff-machines.sh
new file mode 100755
index 000000000..27c6a3300
--- /dev/null
+++ b/poky/scripts/sstate-diff-machines.sh
@@ -0,0 +1,172 @@
+#!/bin/bash
+
+# Used to compare sstate checksums between MACHINES.
+# Execute script and compare generated list.M files.
+# Using bash to have PIPESTATUS variable.
+
+# It's also usefull to keep older sstate checksums
+# to be able to find out why something is rebuilding
+# after updating metadata
+
+# $ diff \
+# sstate-diff/1349348392/fake-cortexa8/list.M \
+# sstate-diff/1349348392/fake-cortexa9/list.M \
+# | wc -l
+# 538
+
+# Then to compare sigdata use something like:
+# $ ls sstate-diff/1349348392/*/armv7a-vfp-neon*/linux-libc-headers/*do_configure*sigdata*
+# sstate-diff/1349348392/fake-cortexa8/armv7a-vfp-neon-oe-linux-gnueabi/linux-libc-headers/3.4.3-r0.do_configure.sigdata.cb73b3630a7b8191e72fc469c5137025
+# sstate-diff/1349348392/fake-cortexa9/armv7a-vfp-neon-oe-linux-gnueabi/linux-libc-headers/3.4.3-r0.do_configure.sigdata.f37ada177bf99ce8af85914df22b5a0b
+# $ bitbake-diffsigs stamps.1349348392/*/armv7a-vfp-neon*/linux-libc-headers/*do_configure*sigdata*
+# basehash changed from 8d0bd67bb1da6f68717760fc3ef43171 to e869fa61426e88e9c30726ba88a1216a
+# Variable TUNE_CCARGS value changed from -march=armv7-a -mthumb-interwork -mfloat-abi=softfp -mfpu=neon -mtune=cortex-a8 to -march=armv7-a -mthumb-interwork -mfloat-abi=softfp -mfpu=neon -mtune=cortex-a9
+
+# Global vars
+tmpdir=
+machines=
+targets=
+default_machines="qemuarm qemux86 qemux86-64"
+default_targets="core-image-base"
+analyze="N"
+
+usage () {
+ cat << EOF
+Welcome to utility to compare sstate checksums between different MACHINEs.
+$0 <OPTION>
+
+Options:
+ -h, --help
+ Display this help and exit.
+
+ --tmpdir=<tmpdir>
+ Specify tmpdir, will use the environment variable TMPDIR if it is not specified.
+ Something like /OE/oe-core/tmp-eglibc (no / at the end).
+
+ --machines=<machines>
+ List of MACHINEs separated by space, will use the environment variable MACHINES if it is not specified.
+ Default value is "qemuarm qemux86 qemux86-64".
+
+ --targets=<targets>
+ List of targets separated by space, will use the environment variable TARGETS if it is not specified.
+ Default value is "core-image-base".
+
+ --analyze
+ Show the differences between MACHINEs. It assumes:
+ * First 2 MACHINEs in --machines parameter have the same TUNE_PKGARCH
+ * Third optional MACHINE has different TUNE_PKGARCH - only native and allarch recipes are compared).
+ * Next MACHINEs are ignored
+EOF
+}
+
+# Print error information and exit.
+echo_error () {
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+while [ -n "$1" ]; do
+ case $1 in
+ --tmpdir=*)
+ tmpdir=`echo $1 | sed -e 's#^--tmpdir=##' | xargs readlink -e`
+ [ -d "$tmpdir" ] || echo_error "Invalid argument to --tmpdir"
+ shift
+ ;;
+ --machines=*)
+ machines=`echo $1 | sed -e 's#^--machines="*\([^"]*\)"*#\1#'`
+ shift
+ ;;
+ --targets=*)
+ targets=`echo $1 | sed -e 's#^--targets="*\([^"]*\)"*#\1#'`
+ shift
+ ;;
+ --analyze)
+ analyze="Y"
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "Invalid arguments $*"
+ echo_error "Try '$0 -h' for more information."
+ ;;
+ esac
+done
+
+# tmpdir directory, use environment variable TMPDIR
+# if it was not specified, otherwise, error.
+[ -n "$tmpdir" ] || tmpdir=$TMPDIR
+[ -n "$tmpdir" ] || echo_error "No tmpdir found!"
+[ -d "$tmpdir" ] || echo_error "Invalid tmpdir \"$tmpdir\""
+[ -n "$machines" ] || machines=$MACHINES
+[ -n "$machines" ] || machines=$default_machines
+[ -n "$targets" ] || targets=$TARGETS
+[ -n "$targets" ] || targets=$default_targets
+
+OUTPUT=${tmpdir}/sstate-diff/`date "+%s"`
+declare -i RESULT=0
+
+for M in ${machines}; do
+ [ -d ${tmpdir}/stamps/ ] && find ${tmpdir}/stamps/ -name \*sigdata\* | xargs rm -f
+ mkdir -p ${OUTPUT}/${M}
+ export MACHINE=${M}
+ bitbake -S none ${targets} 2>&1 | tee -a ${OUTPUT}/${M}/log;
+ RESULT+=${PIPESTATUS[0]}
+ if ls ${tmpdir}/stamps/* >/dev/null 2>/dev/null ; then
+ cp -ra ${tmpdir}/stamps/* ${OUTPUT}/${M}
+ find ${OUTPUT}/${M} -name \*sigdata\* | sed "s#${OUTPUT}/${M}/##g" | sort > ${OUTPUT}/${M}/list
+ M_UNDERSCORE=`echo ${M} | sed 's/-/_/g'`
+ sed "s/^${M_UNDERSCORE}-/MACHINE/g" ${OUTPUT}/${M}/list | sort > ${OUTPUT}/${M}/list.M
+ find ${tmpdir}/stamps/ -name \*sigdata\* | xargs rm -f
+ else
+ printf "ERROR: no sigdata files were generated for MACHINE $M in ${tmpdir}/stamps\n";
+ fi
+done
+
+function compareSignatures() {
+ MACHINE1=$1
+ MACHINE2=$2
+ PATTERN="$3"
+ PRE_PATTERN=""
+ [ -n "${PATTERN}" ] || PRE_PATTERN="-v"
+ [ -n "${PATTERN}" ] || PATTERN="MACHINE"
+ for TASK in do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata; do
+ printf "\n\n === Comparing signatures for task ${TASK} between ${MACHINE1} and ${MACHINE2} ===\n" | tee -a ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log
+ diff ${OUTPUT}/${MACHINE1}/list.M ${OUTPUT}/${MACHINE2}/list.M | grep ${PRE_PATTERN} "${PATTERN}" | grep ${TASK} > ${OUTPUT}/signatures.${MACHINE2}.${TASK}
+ for i in `cat ${OUTPUT}/signatures.${MACHINE2}.${TASK} | sed 's#[^/]*/\([^/]*\)/.*#\1#g' | sort -u | xargs`; do
+ [ -e ${OUTPUT}/${MACHINE1}/*/$i/*${TASK}* ] || echo "INFO: ${i} task ${TASK} doesn't exist in ${MACHINE1}" >&2
+ [ -e ${OUTPUT}/${MACHINE1}/*/$i/*${TASK}* ] || continue
+ [ -e ${OUTPUT}/${MACHINE2}/*/$i/*${TASK}* ] || echo "INFO: ${i} task ${TASK} doesn't exist in ${MACHINE2}" >&2
+ [ -e ${OUTPUT}/${MACHINE2}/*/$i/*${TASK}* ] || continue
+ printf "ERROR: $i different signature for task ${TASK} between ${MACHINE1} and ${MACHINE2}\n";
+ bitbake-diffsigs ${OUTPUT}/${MACHINE1}/*/$i/*${TASK}* ${OUTPUT}/${MACHINE2}/*/$i/*${TASK}*;
+ echo "$i" >> ${OUTPUT}/failed-recipes.log
+ echo
+ done | tee -a ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log
+ # don't create empty files
+ ERRORS=`grep "^ERROR.*" ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log | wc -l`
+ if [ "${ERRORS}" != "0" ] ; then
+ echo "ERROR: ${ERRORS} errors found in ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log"
+ RESULT+=${ERRORS}
+ fi
+ done
+}
+
+function compareMachines() {
+ [ "$#" -ge 2 ] && compareSignatures $1 $2
+ [ "$#" -ge 3 ] && compareSignatures $1 $3 "\(^< all\)\|\(^< x86_64-linux\)\|\(^< i586-linux\)"
+}
+
+if [ "${analyze}" = "Y" ] ; then
+ compareMachines ${machines}
+fi
+
+if [ "${RESULT}" != "0" -a -f ${OUTPUT}/failed-recipes.log ] ; then
+ cat ${OUTPUT}/failed-recipes.log | sort -u >${OUTPUT}/failed-recipes.log.u && mv ${OUTPUT}/failed-recipes.log.u ${OUTPUT}/failed-recipes.log
+ echo "ERROR: ${RESULT} issues were found in these recipes: `cat ${OUTPUT}/failed-recipes.log | xargs`"
+fi
+
+echo "INFO: Output written in: ${OUTPUT}"
+exit ${RESULT}
diff --git a/poky/scripts/sstate-sysroot-cruft.sh b/poky/scripts/sstate-sysroot-cruft.sh
new file mode 100755
index 000000000..d9917f515
--- /dev/null
+++ b/poky/scripts/sstate-sysroot-cruft.sh
@@ -0,0 +1,199 @@
+#!/bin/sh
+
+# Used to find files installed in sysroot which are not tracked by sstate manifest
+
+# Global vars
+tmpdir=
+
+usage () {
+ cat << EOF
+Welcome to sysroot cruft finding utility.
+$0 <OPTION>
+
+Options:
+ -h, --help
+ Display this help and exit.
+
+ --tmpdir=<tmpdir>
+ Specify tmpdir, will use the environment variable TMPDIR if it is not specified.
+ Something like /OE/oe-core/tmp-eglibc (no / at the end).
+
+ --whitelist=<whitelist-file>
+ Text file, each line is regular expression for paths we want to ignore in resulting diff.
+ You can use diff file from the script output, if it contains only expected exceptions.
+ '#' is used as regexp delimiter, so you don't need to prefix forward slashes in paths.
+ ^ and $ is automatically added, so provide only the middle part.
+ Lines starting with '#' are ignored as comments.
+ All paths are relative to "sysroots" directory.
+ Directories don't end with forward slash.
+EOF
+}
+
+# Print error information and exit.
+echo_error () {
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+while [ -n "$1" ]; do
+ case $1 in
+ --tmpdir=*)
+ tmpdir=`echo $1 | sed -e 's#^--tmpdir=##' | xargs readlink -e`
+ [ -d "$tmpdir" ] || echo_error "Invalid argument to --tmpdir"
+ shift
+ ;;
+ --whitelist=*)
+ fwhitelist=`echo $1 | sed -e 's#^--whitelist=##' | xargs readlink -e`
+ [ -f "$fwhitelist" ] || echo_error "Invalid argument to --whitelist"
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "Invalid arguments $*"
+ echo_error "Try '$0 -h' for more information."
+ ;;
+ esac
+done
+
+# sstate cache directory, use environment variable TMPDIR
+# if it was not specified, otherwise, error.
+[ -n "$tmpdir" ] || tmpdir=$TMPDIR
+[ -n "$tmpdir" ] || echo_error "No tmpdir found!"
+[ -d "$tmpdir" ] || echo_error "Invalid tmpdir \"$tmpdir\""
+
+OUTPUT=${tmpdir}/sysroot.cruft.`date "+%s"`
+
+# top level directories
+WHITELIST="[^/]*"
+
+# generated by base-passwd recipe
+WHITELIST="${WHITELIST} \
+ .*/etc/group-\? \
+ .*/etc/passwd-\? \
+"
+# generated by pseudo-native
+WHITELIST="${WHITELIST} \
+ .*/var/pseudo \
+ .*/var/pseudo/[^/]* \
+"
+
+# generated by package.bbclass:SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs"
+WHITELIST="${WHITELIST} \
+ .*/shlibs \
+ .*/pkgdata \
+"
+
+# generated by python
+WHITELIST="${WHITELIST} \
+ .*\.pyc \
+ .*\.pyo \
+ .*/__pycache__ \
+"
+
+# generated by lua
+WHITELIST="${WHITELIST} \
+ .*\.luac \
+"
+
+# generated by sgml-common-native
+WHITELIST="${WHITELIST} \
+ .*/etc/sgml/sgml-docbook.bak \
+"
+
+# generated by php
+WHITELIST="${WHITELIST} \
+ .*/usr/lib/php5/php/.channels \
+ .*/usr/lib/php5/php/.channels/.* \
+ .*/usr/lib/php5/php/.registry \
+ .*/usr/lib/php5/php/.registry/.* \
+ .*/usr/lib/php5/php/.depdb \
+ .*/usr/lib/php5/php/.depdblock \
+ .*/usr/lib/php5/php/.filemap \
+ .*/usr/lib/php5/php/.lock \
+"
+
+# generated by toolchain
+WHITELIST="${WHITELIST} \
+ [^/]*-tcbootstrap/lib \
+"
+
+# generated by useradd.bbclass
+WHITELIST="${WHITELIST} \
+ [^/]*/home \
+ [^/]*/home/xuser \
+ [^/]*/home/xuser/.bashrc \
+ [^/]*/home/xuser/.profile \
+ [^/]*/home/builder \
+ [^/]*/home/builder/.bashrc \
+ [^/]*/home/builder/.profile \
+"
+
+# generated by image.py for WIC
+# introduced in oe-core commit 861ce6c5d4836df1a783be3b01d2de56117c9863
+WHITELIST="${WHITELIST} \
+ [^/]*/imgdata \
+ [^/]*/imgdata/[^/]*\.env \
+"
+
+# generated by fontcache.bbclass
+WHITELIST="${WHITELIST} \
+ .*/var/cache/fontconfig/ \
+"
+
+# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes:
+# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
+# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
+# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
+# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811
+# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061
+WHITELIST="${WHITELIST} \
+ [^/]*/etc/ld.so.conf \
+"
+
+SYSROOTS="`readlink -f ${tmpdir}`/sysroots/"
+
+mkdir ${OUTPUT}
+find ${tmpdir}/sstate-control -name \*.populate-sysroot\* -o -name \*.populate_sysroot\* -o -name \*.package\* | xargs cat | grep sysroots | \
+ sed 's#/$##g; s#///*#/#g' | \
+ # work around for paths ending with / for directories and multiplied // (e.g. paths to native sysroot)
+ sort | sed "s#^${SYSROOTS}##g" > ${OUTPUT}/master.list.all.txt
+sort -u ${OUTPUT}/master.list.all.txt > ${OUTPUT}/master.list.txt # -u because some directories are listed for more recipes
+find ${tmpdir}/sysroots/ | \
+ sort | sed "s#^${SYSROOTS}##g" > ${OUTPUT}/sysroot.list.txt
+
+diff ${OUTPUT}/master.list.all.txt ${OUTPUT}/master.list.txt > ${OUTPUT}/duplicates.txt
+diff ${OUTPUT}/master.list.txt ${OUTPUT}/sysroot.list.txt > ${OUTPUT}/diff.all.txt
+
+grep "^> ." ${OUTPUT}/diff.all.txt | sed 's/^> //g' > ${OUTPUT}/diff.txt
+for item in ${WHITELIST}; do
+ sed -i "\\#^${item}\$#d" ${OUTPUT}/diff.txt;
+ echo "${item}" >> ${OUTPUT}/used.whitelist.txt
+done
+
+if [ -s "$fwhitelist" ] ; then
+ cat $fwhitelist >> ${OUTPUT}/used.whitelist.txt
+ cat $fwhitelist | grep -v '^#' | while read item; do
+ sed -i "\\#^${item}\$#d" ${OUTPUT}/diff.txt;
+ done
+fi
+# too many false positives for directories
+# echo "Following files are installed in sysroot at least twice"
+# cat ${OUTPUT}/duplicates
+
+RESULT=`cat ${OUTPUT}/diff.txt | wc -l`
+
+if [ "${RESULT}" != "0" ] ; then
+ echo "ERROR: ${RESULT} issues were found."
+ echo "ERROR: Following files are installed in sysroot, but not tracked by sstate:"
+ cat ${OUTPUT}/diff.txt
+else
+ echo "INFO: All files are tracked by sstate or were explicitly ignored by this script"
+fi
+
+echo "INFO: Output written in: ${OUTPUT}"
+exit ${RESULT}
diff --git a/poky/scripts/sysroot-relativelinks.py b/poky/scripts/sysroot-relativelinks.py
new file mode 100755
index 000000000..ffe254728
--- /dev/null
+++ b/poky/scripts/sysroot-relativelinks.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+import sys
+import os
+
+# Take a sysroot directory and turn all the abolute symlinks and turn them into
+# relative ones such that the sysroot is usable within another system.
+
+if len(sys.argv) != 2:
+ print("Usage is " + sys.argv[0] + "<directory>")
+ sys.exit(1)
+
+topdir = sys.argv[1]
+topdir = os.path.abspath(topdir)
+
+def handlelink(filep, subdir):
+ link = os.readlink(filep)
+ if link[0] != "/":
+ return
+ if link.startswith(topdir):
+ return
+ #print("Replacing %s with %s for %s" % (link, topdir+link, filep))
+ print("Replacing %s with %s for %s" % (link, os.path.relpath(topdir+link, subdir), filep))
+ os.unlink(filep)
+ os.symlink(os.path.relpath(topdir+link, subdir), filep)
+
+for subdir, dirs, files in os.walk(topdir):
+ for f in dirs + files:
+ filep = os.path.join(subdir, f)
+ if os.path.islink(filep):
+ #print("Considering %s" % filep)
+ handlelink(filep, subdir)
diff --git a/poky/scripts/task-time b/poky/scripts/task-time
new file mode 100755
index 000000000..e58040a9b
--- /dev/null
+++ b/poky/scripts/task-time
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import re
+import sys
+
+arg_parser = argparse.ArgumentParser(
+ description="""
+Reports time consumed for one or more task in a format similar to the standard
+Bash 'time' builtin. Optionally sorts tasks by real (wall-clock), user (user
+space CPU), or sys (kernel CPU) time.
+""")
+
+arg_parser.add_argument(
+ "paths",
+ metavar="path",
+ nargs="+",
+ help="""
+A path containing task buildstats. If the path is a directory, e.g.
+build/tmp/buildstats, then all task found (recursively) in it will be
+processed. If the path is a single task buildstat, e.g.
+build/tmp/buildstats/20161018083535/foo-1.0-r0/do_compile, then just that
+buildstat will be processed. Multiple paths can be specified to process all of
+them. Files whose names do not start with "do_" are ignored.
+""")
+
+arg_parser.add_argument(
+ "--sort",
+ choices=("none", "real", "user", "sys"),
+ default="none",
+ help="""
+The measurement to sort the output by. Defaults to 'none', which means to sort
+by the order paths were given on the command line. For other options, tasks are
+sorted in descending order from the highest value.
+""")
+
+args = arg_parser.parse_args()
+
+# Field names and regexes for parsing out their values from buildstat files
+field_regexes = (("elapsed", ".*Elapsed time: ([0-9.]+)"),
+ ("user", "rusage ru_utime: ([0-9.]+)"),
+ ("sys", "rusage ru_stime: ([0-9.]+)"),
+ ("child user", "Child rusage ru_utime: ([0-9.]+)"),
+ ("child sys", "Child rusage ru_stime: ([0-9.]+)"))
+
+# A list of (<path>, <dict>) tuples, where <path> is the path of a do_* task
+# buildstat file and <dict> maps fields from the file to their values
+task_infos = []
+
+def save_times_for_task(path):
+ """Saves information for the buildstat file 'path' in 'task_infos'."""
+
+ if not os.path.basename(path).startswith("do_"):
+ return
+
+ with open(path) as f:
+ fields = {}
+
+ for line in f:
+ for name, regex in field_regexes:
+ match = re.match(regex, line)
+ if match:
+ fields[name] = float(match.group(1))
+ break
+
+ # Check that all expected fields were present
+ for name, regex in field_regexes:
+ if name not in fields:
+ print("Warning: Skipping '{}' because no field matching '{}' could be found"
+ .format(path, regex),
+ file=sys.stderr)
+ return
+
+ task_infos.append((path, fields))
+
+def save_times_for_dir(path):
+ """Runs save_times_for_task() for each file in path and its subdirs, recursively."""
+
+ # Raise an exception for os.walk() errors instead of ignoring them
+ def walk_onerror(e):
+ raise e
+
+ for root, _, files in os.walk(path, onerror=walk_onerror):
+ for fname in files:
+ save_times_for_task(os.path.join(root, fname))
+
+for path in args.paths:
+ if os.path.isfile(path):
+ save_times_for_task(path)
+ else:
+ save_times_for_dir(path)
+
+def elapsed_time(task_info):
+ return task_info[1]["elapsed"]
+
+def tot_user_time(task_info):
+ return task_info[1]["user"] + task_info[1]["child user"]
+
+def tot_sys_time(task_info):
+ return task_info[1]["sys"] + task_info[1]["child sys"]
+
+if args.sort != "none":
+ sort_fn = {"real": elapsed_time, "user": tot_user_time, "sys": tot_sys_time}
+ task_infos.sort(key=sort_fn[args.sort], reverse=True)
+
+first_entry = True
+
+# Catching BrokenPipeError avoids annoying errors when the output is piped into
+# e.g. 'less' or 'head' and not completely read
+try:
+ for task_info in task_infos:
+ real = elapsed_time(task_info)
+ user = tot_user_time(task_info)
+ sys = tot_sys_time(task_info)
+
+ if not first_entry:
+ print()
+ first_entry = False
+
+ # Mimic Bash's 'time' builtin
+ print("{}:\n"
+ "real\t{}m{:.3f}s\n"
+ "user\t{}m{:.3f}s\n"
+ "sys\t{}m{:.3f}s"
+ .format(task_info[0],
+ int(real//60), real%60,
+ int(user//60), user%60,
+ int(sys//60), sys%60))
+
+except BrokenPipeError:
+ pass
diff --git a/poky/scripts/test-reexec b/poky/scripts/test-reexec
new file mode 100755
index 000000000..30e792c7d
--- /dev/null
+++ b/poky/scripts/test-reexec
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+# Test Script for task re-execution
+#
+# Copyright 2012 Intel Corporation
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# DESCRIPTION
+# This script is intended to address issues for re-execution of
+# tasks. The test results are saved in ./reexeclogs. Force build
+# logs are saved with prefix "force". Build failure logs are saved with
+# prefix "failed". Log files with prefix "initial" are used to save
+# initial build logs for each recipe. Log files with prefix "clean" are
+# used to save logs of clean task after testing for a recipe is finished.
+#
+
+targets=`bitbake -s | cut -d " " -f 1`
+
+LOGS=./reexeclogs
+
+mkdir -p $LOGS
+
+# Clear sstate files for specified recipe
+function clearsstate {
+ target=$1
+
+ sstate_dir=`bitbake $target -e | grep "^SSTATE_DIR=" | cut -d "\"" -f 2`
+ sstate_pkgspec=`bitbake $target -e | grep "^SSTATE_PKGSPEC=" | cut -d "\"" -f 2`
+ sstasks=`bitbake $target -e | grep "^SSTATETASKS=" | cut -d "\"" -f 2`
+
+ for sstask in $sstasks
+ do
+ sstask=${sstask:3}
+ case $sstask in
+ populate_sysroot) sstask="populate-sysroot"
+ ;;
+ populate_lic) sstask="populate-lic"
+ ;;
+ package_write_ipk) sstask="deploy-ipk"
+ ;;
+ package_write_deb) sstask="deploy-deb"
+ ;;
+ package_write_rpm) sstask="deploy-rpm"
+ ;;
+ package) sstask="package"
+ ;;
+ deploy) sstask="deploy"
+ ;;
+ *)
+ ;;
+ esac
+
+ echo "Removing ${sstate_dir}/${sstate_pkgspec}*_${sstask}.tgz* for $target"
+ rm -rf ${sstate_dir}/${sstate_pkgspec}*_${sstask}.tgz*
+ done
+}
+
+# Function to re-execute specified task of recipe
+function testit {
+ target=$1
+ task=$2
+
+ task=`echo $task | sed 's/_setscene//'`
+
+ if [ -f $LOGS/force.$target.$task ]; then
+ return
+ fi
+
+ case $task in
+ clean|build|cleansstate|cleanall|package|cleansstate2|package_write|package_write_ipk|package_write_rpm|package_write_deb|fetch|populate_lic) return;;
+ fetchall|devshell|buildall|listtasks|checkuri|checkuriall) return;;
+ esac
+
+ echo "Attempting target $target, task $task"
+ echo "Initial build"
+ bitbake $target -c cleansstate > $LOGS/initial.$target.$task
+ bitbake $target >> $LOGS/initial.$target.$task
+ clearsstate $target >> $LOGS/initial.$target.$task
+ echo "Re-execution build"
+ bitbake $target -c $task -f > $LOGS/force.$target.$task
+ if [ "$?" != 0 ]; then
+ echo "FAILURE for $target $task"
+ cp $LOGS/force.$target.$task $LOGS/failed.$target.$task
+ bitbake $target -c clean > $LOGS/clean.$target.$task
+ else
+ bitbake $target >> $LOGS/force.$target.$task
+ if [ "$?" != 0 ]; then
+ echo "FAILURE2 for $target $task"
+ cp $LOGS/force.$target.$task $LOGS/failed.$target.$task
+ bitbake $target -c clean > $LOGS/clean.$target.$task
+ fi
+ fi
+ echo "Done"
+}
+
+# Go through the recipe list and these recipes' task list
+# Then re-execute them
+for target in $targets; do
+ # Remove log messages from bitbake output
+ case $target in
+ Summary*|WARNING*|Loading*|Loaded*|Package*|=====*) continue;;
+ esac
+ tasks=`bitbake $target -c listtasks | grep ^do_ | sed s/do_//`
+ for task in $tasks; do
+ testit $target $task
+ done
+done
+
+
diff --git a/poky/scripts/test-remote-image b/poky/scripts/test-remote-image
new file mode 100755
index 000000000..27b1cae38
--- /dev/null
+++ b/poky/scripts/test-remote-image
@@ -0,0 +1,357 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2014 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# DESCRIPTION
+# This script is used to test public autobuilder images on remote hardware.
+# The script is called from a machine that is able download the images from the remote images repository and to connect to the test hardware.
+#
+# test-remote-image --image-type core-image-sato --repo-link http://192.168.10.2/images --required-packages rpm psplash
+#
+# Translation: Build the 'rpm' and 'pslash' packages and test a remote core-image-sato image using the http://192.168.10.2/images repository.
+#
+# You can also use the '-h' option to see some help information.
+
+import os
+import sys
+import argparse
+import logging
+import shutil
+from abc import ABCMeta, abstractmethod
+
+# Add path to scripts/lib in sys.path;
+scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+import argparse_oe
+
+# Add meta/lib to sys.path
+scriptpath.add_oe_lib_path()
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+
+# Add all lib paths relative to BBPATH to sys.path; this is used to find and import the target controllers.
+for path in get_bb_var('BBPATH').split(":"):
+ sys.path.insert(0, os.path.abspath(os.path.join(path, 'lib')))
+
+# In order to import modules that contain target controllers, we need the bitbake libraries in sys.path .
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
+ sys.exit(1)
+
+# create a logger
+def logger_create():
+ log = logging.getLogger('hwauto')
+ log.setLevel(logging.DEBUG)
+
+ fh = logging.FileHandler(filename='hwauto.log', mode='w')
+ fh.setLevel(logging.DEBUG)
+
+ ch = logging.StreamHandler(sys.stdout)
+ ch.setLevel(logging.INFO)
+
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ fh.setFormatter(formatter)
+ ch.setFormatter(formatter)
+
+ log.addHandler(fh)
+ log.addHandler(ch)
+
+ return log
+
+# instantiate the logger
+log = logger_create()
+
+
+# Define and return the arguments parser for the script
+def get_args_parser():
+ description = "This script is used to run automated runtime tests using remotely published image files. You should prepare the build environment just like building local images and running the tests."
+ parser = argparse_oe.ArgumentParser(description=description)
+ parser.add_argument('--image-types', required=True, action="store", nargs='*', dest="image_types", default=None, help='The image types to test(ex: core-image-minimal).')
+ parser.add_argument('--repo-link', required=True, action="store", type=str, dest="repo_link", default=None, help='The link to the remote images repository.')
+ parser.add_argument('--required-packages', required=False, action="store", nargs='*', dest="required_packages", default=None, help='Required packages for the tests. They will be built before the testing begins.')
+ parser.add_argument('--targetprofile', required=False, action="store", nargs=1, dest="targetprofile", default='AutoTargetProfile', help='The target profile to be used.')
+ parser.add_argument('--repoprofile', required=False, action="store", nargs=1, dest="repoprofile", default='PublicAB', help='The repo profile to be used.')
+ parser.add_argument('--skip-download', required=False, action="store_true", dest="skip_download", default=False, help='Skip downloading the images completely. This needs the correct files to be present in the directory specified by the target profile.')
+ return parser
+
+class BaseTargetProfile(object, metaclass=ABCMeta):
+ """
+ This class defines the meta profile for a specific target (MACHINE type + image type).
+ """
+
+ def __init__(self, image_type):
+ self.image_type = image_type
+
+ self.kernel_file = None
+ self.rootfs_file = None
+ self.manifest_file = None
+ self.extra_download_files = [] # Extra files (full name) to be downloaded. They should be situated in repo_link
+
+ # This method is used as the standard interface with the target profile classes.
+ # It returns a dictionary containing a list of files and their meaning/description.
+ def get_files_dict(self):
+ files_dict = {}
+
+ if self.kernel_file:
+ files_dict['kernel_file'] = self.kernel_file
+ else:
+ log.error('The target profile did not set a kernel file.')
+ sys.exit(1)
+
+ if self.rootfs_file:
+ files_dict['rootfs_file'] = self.rootfs_file
+ else:
+ log.error('The target profile did not set a rootfs file.')
+ sys.exit(1)
+
+ if self.manifest_file:
+ files_dict['manifest_file'] = self.manifest_file
+ else:
+ log.error('The target profile did not set a manifest file.')
+ sys.exit(1)
+
+ for idx, f in enumerate(self.extra_download_files):
+ files_dict['extra_download_file' + str(idx)] = f
+
+ return files_dict
+
+class AutoTargetProfile(BaseTargetProfile):
+
+ def __init__(self, image_type):
+ super(AutoTargetProfile, self).__init__(image_type)
+ self.image_name = get_bb_var('IMAGE_LINK_NAME', target=image_type)
+ self.kernel_type = get_bb_var('KERNEL_IMAGETYPE', target=image_type)
+ self.controller = self.get_controller()
+
+ self.set_kernel_file()
+ self.set_rootfs_file()
+ self.set_manifest_file()
+ self.set_extra_download_files()
+
+ # Get the controller object that will be used by bitbake.
+ def get_controller(self):
+ from oeqa.controllers.testtargetloader import TestTargetLoader
+
+ target_controller = get_bb_var('TEST_TARGET')
+ bbpath = get_bb_var('BBPATH').split(':')
+
+ if target_controller == "qemu":
+ from oeqa.targetcontrol import QemuTarget
+ controller = QemuTarget
+ else:
+ testtargetloader = TestTargetLoader()
+ controller = testtargetloader.get_controller_module(target_controller, bbpath)
+ return controller
+
+ def set_kernel_file(self):
+ postconfig = "QA_GET_MACHINE = \"${MACHINE}\""
+ machine = get_bb_var('QA_GET_MACHINE', postconfig=postconfig)
+ self.kernel_file = self.kernel_type + '-' + machine + '.bin'
+
+ def set_rootfs_file(self):
+ image_fstypes = get_bb_var('IMAGE_FSTYPES').split(' ')
+ # Get a matching value between target's IMAGE_FSTYPES and the image fstypes suppoerted by the target controller.
+ fstype = self.controller.match_image_fstype(d=None, image_fstypes=image_fstypes)
+ if fstype:
+ self.rootfs_file = self.image_name + '.' + fstype
+ else:
+ log.error("Could not get a compatible image fstype. Check that IMAGE_FSTYPES and the target controller's supported_image_fstypes fileds have common values.")
+ sys.exit(1)
+
+ def set_manifest_file(self):
+ self.manifest_file = self.image_name + ".manifest"
+
+ def set_extra_download_files(self):
+ self.extra_download_files = self.get_controller_extra_files()
+ if not self.extra_download_files:
+ self.extra_download_files = []
+
+ def get_controller_extra_files(self):
+ controller = self.get_controller()
+ return controller.get_extra_files()
+
+
+class BaseRepoProfile(object, metaclass=ABCMeta):
+ """
+ This class defines the meta profile for an images repository.
+ """
+
+ def __init__(self, repolink, localdir):
+ self.localdir = localdir
+ self.repolink = repolink
+
+ # The following abstract methods are the interfaces to the repository profile classes derived from this abstract class.
+
+ # This method should check the file named 'file_name' if it is different than the upstream one.
+ # Should return False if the image is the same as the upstream and True if it differs.
+ @abstractmethod
+ def check_old_file(self, file_name):
+ pass
+
+ # This method should fetch file_name and create a symlink to localname if set.
+ @abstractmethod
+ def fetch(self, file_name, localname=None):
+ pass
+
+class PublicAB(BaseRepoProfile):
+
+ def __init__(self, repolink, localdir=None):
+ super(PublicAB, self).__init__(repolink, localdir)
+ if localdir is None:
+ self.localdir = os.path.join(os.environ['BUILDDIR'], 'PublicABMirror')
+
+ # Not yet implemented. Always returning True.
+ def check_old_file(self, file_name):
+ return True
+
+ def get_repo_path(self):
+ path = '/machines/'
+
+ postconfig = "QA_GET_MACHINE = \"${MACHINE}\""
+ machine = get_bb_var('QA_GET_MACHINE', postconfig=postconfig)
+ if 'qemu' in machine:
+ path += 'qemu/'
+
+ postconfig = "QA_GET_DISTRO = \"${DISTRO}\""
+ distro = get_bb_var('QA_GET_DISTRO', postconfig=postconfig)
+ path += distro.replace('poky', machine) + '/'
+ return path
+
+
+ def fetch(self, file_name, localname=None):
+ repo_path = self.get_repo_path()
+ link = self.repolink + repo_path + file_name
+
+ self.wget(link, self.localdir, localname)
+
+ def wget(self, link, localdir, localname=None, extraargs=None):
+ wget_cmd = '/usr/bin/env wget -t 2 -T 30 -nv --passive-ftp --no-check-certificate '
+
+ if localname:
+ wget_cmd += ' -O ' + localname + ' '
+
+ if extraargs:
+ wget_cmd += ' ' + extraargs + ' '
+
+ wget_cmd += " -P %s '%s'" % (localdir, link)
+ runCmd(wget_cmd)
+
+class HwAuto():
+
+ def __init__(self, image_types, repolink, required_packages, targetprofile, repoprofile, skip_download):
+ log.info('Initializing..')
+ self.image_types = image_types
+ self.repolink = repolink
+ self.required_packages = required_packages
+ self.targetprofile = targetprofile
+ self.repoprofile = repoprofile
+ self.skip_download = skip_download
+ self.repo = self.get_repo_profile(self.repolink)
+
+ # Get the repository profile; for now we only look inside this module.
+ def get_repo_profile(self, *args, **kwargs):
+ repo = getattr(sys.modules[__name__], self.repoprofile)(*args, **kwargs)
+ log.info("Using repo profile: %s" % repo.__class__.__name__)
+ return repo
+
+ # Get the target profile; for now we only look inside this module.
+ def get_target_profile(self, *args, **kwargs):
+ target = getattr(sys.modules[__name__], self.targetprofile)(*args, **kwargs)
+ log.info("Using target profile: %s" % target.__class__.__name__)
+ return target
+
+ # Run the testimage task on a build while redirecting DEPLOY_DIR_IMAGE to repo.localdir, where the images are downloaded.
+ def runTestimageBuild(self, image_type):
+ log.info("Running the runtime tests for %s.." % image_type)
+ postconfig = "DEPLOY_DIR_IMAGE = \"%s\"" % self.repo.localdir
+ result = bitbake("%s -c testimage" % image_type, ignore_status=True, postconfig=postconfig)
+ testimage_results = ftools.read_file(os.path.join(get_bb_var("T", image_type), "log.do_testimage"))
+ log.info('Runtime tests results for %s:' % image_type)
+ print(testimage_results)
+ return result
+
+ # Start the procedure!
+ def run(self):
+ if self.required_packages:
+ # Build the required packages for the tests
+ log.info("Building the required packages: %s ." % ', '.join(map(str, self.required_packages)))
+ result = bitbake(self.required_packages, ignore_status=True)
+ if result.status != 0:
+ log.error("Could not build required packages: %s. Output: %s" % (self.required_packages, result.output))
+ sys.exit(1)
+
+ # Build the package repository meta data.
+ log.info("Building the package index.")
+ result = bitbake("package-index", ignore_status=True)
+ if result.status != 0:
+ log.error("Could not build 'package-index'. Output: %s" % result.output)
+ sys.exit(1)
+
+ # Create the directory structure for the images to be downloaded
+ log.info("Creating directory structure %s" % self.repo.localdir)
+ if not os.path.exists(self.repo.localdir):
+ os.makedirs(self.repo.localdir)
+
+ # For each image type, download the needed files and run the tests.
+ noissuesfound = True
+ for image_type in self.image_types:
+ if self.skip_download:
+ log.info("Skipping downloading the images..")
+ else:
+ target = self.get_target_profile(image_type)
+ files_dict = target.get_files_dict()
+ log.info("Downloading files for %s" % image_type)
+ for f in files_dict:
+ if self.repo.check_old_file(files_dict[f]):
+ filepath = os.path.join(self.repo.localdir, files_dict[f])
+ if os.path.exists(filepath):
+ os.remove(filepath)
+ self.repo.fetch(files_dict[f])
+
+ result = self.runTestimageBuild(image_type)
+ if result.status != 0:
+ noissuesfound = False
+
+ if noissuesfound:
+ log.info('Finished. No issues found.')
+ else:
+ log.error('Finished. Some runtime tests have failed. Returning non-0 status code.')
+ sys.exit(1)
+
+
+
+def main():
+
+ parser = get_args_parser()
+ args = parser.parse_args()
+
+ hwauto = HwAuto(image_types=args.image_types, repolink=args.repo_link, required_packages=args.required_packages, targetprofile=args.targetprofile, repoprofile=args.repoprofile, skip_download=args.skip_download)
+
+ hwauto.run()
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/tiny/dirsize.py b/poky/scripts/tiny/dirsize.py
new file mode 100755
index 000000000..ddccc5a8c
--- /dev/null
+++ b/poky/scripts/tiny/dirsize.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+#
+# Display details of the root filesystem size, broken up by directory.
+# Allows for limiting by size to focus on the larger files.
+#
+# Author: Darren Hart <dvhart@linux.intel.com>
+#
+
+import os
+import sys
+import stat
+
+class Record:
+ def create(path):
+ r = Record(path)
+
+ s = os.lstat(path)
+ if stat.S_ISDIR(s.st_mode):
+ for p in os.listdir(path):
+ pathname = path + "/" + p
+ ss = os.lstat(pathname)
+ if not stat.S_ISLNK(ss.st_mode):
+ r.records.append(Record.create(pathname))
+ r.size += r.records[-1].size
+ r.records.sort(reverse=True)
+ else:
+ r.size = os.lstat(path).st_size
+
+ return r
+ create = staticmethod(create)
+
+ def __init__(self, path):
+ self.path = path
+ self.size = 0
+ self.records = []
+
+ def __lt__(this, that):
+ if that is None:
+ return False
+ if not isinstance(that, Record):
+ raise TypeError
+ if len(this.records) > 0 and len(that.records) == 0:
+ return False
+ if this.size > that.size:
+ return False
+ return True
+
+ def show(self, minsize):
+ total = 0
+ if self.size <= minsize:
+ return 0
+ print("%10d %s" % (self.size, self.path))
+ for r in self.records:
+ total += r.show(minsize)
+ if len(self.records) == 0:
+ total = self.size
+ return total
+
+
+def main():
+ minsize = 0
+ if len(sys.argv) == 2:
+ minsize = int(sys.argv[1])
+ rootfs = Record.create(".")
+ total = rootfs.show(minsize)
+ print("Displayed %d/%d bytes (%.2f%%)" % \
+ (total, rootfs.size, 100 * float(total) / rootfs.size))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/tiny/ksize.py b/poky/scripts/tiny/ksize.py
new file mode 100755
index 000000000..ea1ca7ff2
--- /dev/null
+++ b/poky/scripts/tiny/ksize.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+#
+# Display details of the kernel build size, broken up by built-in.o. Sort
+# the objects by size. Run from the top level kernel build directory.
+#
+# Author: Darren Hart <dvhart@linux.intel.com>
+#
+
+import sys
+import getopt
+import os
+from subprocess import *
+
+def usage():
+ prog = os.path.basename(sys.argv[0])
+ print('Usage: %s [OPTION]...' % prog)
+ print(' -d, display an additional level of drivers detail')
+ print(' -h, --help display this help and exit')
+ print('')
+ print('Run %s from the top-level Linux kernel build directory.' % prog)
+
+
+class Sizes:
+ def __init__(self, glob):
+ self.title = glob
+ p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
+ output = p.communicate()[0].splitlines()
+ if len(output) > 2:
+ sizes = output[-1].split()[0:4]
+ self.text = int(sizes[0])
+ self.data = int(sizes[1])
+ self.bss = int(sizes[2])
+ self.total = int(sizes[3])
+ else:
+ self.text = self.data = self.bss = self.total = 0
+
+ def show(self, indent=""):
+ print("%-32s %10d | %10d %10d %10d" % \
+ (indent+self.title, self.total, self.text, self.data, self.bss))
+
+
+class Report:
+ def create(filename, title, subglob=None):
+ r = Report(filename, title)
+ path = os.path.dirname(filename)
+
+ p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
+ shell=True, stdout=PIPE, stderr=PIPE)
+ glob = ' '.join(p.communicate()[0].splitlines())
+ oreport = Report(glob, str(path) + "/*.o")
+ oreport.sizes.title = str(path) + "/*.o"
+ r.parts.append(oreport)
+
+ if subglob:
+ p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
+ for f in p.communicate()[0].splitlines():
+ path = os.path.dirname(f)
+ r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
+ r.parts.sort(reverse=True)
+
+ for b in r.parts:
+ r.totals["total"] += b.sizes.total
+ r.totals["text"] += b.sizes.text
+ r.totals["data"] += b.sizes.data
+ r.totals["bss"] += b.sizes.bss
+
+ r.deltas["total"] = r.sizes.total - r.totals["total"]
+ r.deltas["text"] = r.sizes.text - r.totals["text"]
+ r.deltas["data"] = r.sizes.data - r.totals["data"]
+ r.deltas["bss"] = r.sizes.bss - r.totals["bss"]
+ return r
+ create = staticmethod(create)
+
+ def __init__(self, glob, title):
+ self.glob = glob
+ self.title = title
+ self.sizes = Sizes(glob)
+ self.parts = []
+ self.totals = {"total":0, "text":0, "data":0, "bss":0}
+ self.deltas = {"total":0, "text":0, "data":0, "bss":0}
+
+ def show(self, indent=""):
+ rule = str.ljust(indent, 80, '-')
+ print("%-32s %10s | %10s %10s %10s" % \
+ (indent+self.title, "total", "text", "data", "bss"))
+ print(rule)
+ self.sizes.show(indent)
+ print(rule)
+ for p in self.parts:
+ if p.sizes.total > 0:
+ p.sizes.show(indent)
+ print(rule)
+ print("%-32s %10d | %10d %10d %10d" % \
+ (indent+"sum", self.totals["total"], self.totals["text"],
+ self.totals["data"], self.totals["bss"]))
+ print("%-32s %10d | %10d %10d %10d" % \
+ (indent+"delta", self.deltas["total"], self.deltas["text"],
+ self.deltas["data"], self.deltas["bss"]))
+ print("\n")
+
+ def __lt__(this, that):
+ if that is None:
+ return 1
+ if not isinstance(that, Report):
+ raise TypeError
+ return this.sizes.total < that.sizes.total
+
+ def __cmp__(this, that):
+ if that is None:
+ return 1
+ if not isinstance(that, Report):
+ raise TypeError
+ if this.sizes.total < that.sizes.total:
+ return -1
+ if this.sizes.total > that.sizes.total:
+ return 1
+ return 0
+
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "dh", ["help"])
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
+ usage()
+ sys.exit(2)
+
+ driver_detail = False
+ for o, a in opts:
+ if o == '-d':
+ driver_detail = True
+ elif o in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+ else:
+ assert False, "unhandled option"
+
+ glob = "arch/*/built-in.o */built-in.o"
+ vmlinux = Report.create("vmlinux", "Linux Kernel", glob)
+
+ vmlinux.show()
+ for b in vmlinux.parts:
+ if b.totals["total"] > 0 and len(b.parts) > 1:
+ b.show()
+ if b.title == "drivers" and driver_detail:
+ for d in b.parts:
+ if d.totals["total"] > 0 and len(d.parts) > 1:
+ d.show(" ")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/poky/scripts/tiny/ksum.py b/poky/scripts/tiny/ksum.py
new file mode 100755
index 000000000..d4f389215
--- /dev/null
+++ b/poky/scripts/tiny/ksum.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2016, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
+# module sizes for a built kernel, as a quick tool for comparing the
+# overall effects of systemic tinification changes. Execute from the
+# base directory of the kernel build you want to summarize. Setting
+# the 'verbose' flag will display the sizes for each file included in
+# the summary.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+__version__ = "0.1.0"
+
+# Python Standard Library modules
+import os
+import sys
+import getopt
+from subprocess import *
+
+def usage():
+ prog = os.path.basename(sys.argv[0])
+ print('Usage: %s [OPTION]...' % prog)
+ print(' -v, display sizes for each file')
+ print(' -h, --help display this help and exit')
+ print('')
+ print('Run %s from the top-level Linux kernel build directory.' % prog)
+
+verbose = False
+
+n_ko_files = 0
+ko_file_list = []
+
+ko_text = 0
+ko_data = 0
+ko_bss = 0
+ko_total = 0
+
+vmlinux_file = ""
+vmlinux_level = 0
+
+vmlinux_text = 0
+vmlinux_data = 0
+vmlinux_bss = 0
+vmlinux_total = 0
+
+def is_vmlinux_file(filename):
+ global vmlinux_level
+ if filename == ("vmlinux") and vmlinux_level == 0:
+ vmlinux_level += 1
+ return True
+ return False
+
+def is_ko_file(filename):
+ if filename.endswith(".ko"):
+ return True
+ return False
+
+def collect_object_files():
+ print "Collecting object files recursively from %s..." % os.getcwd()
+ for dirpath, dirs, files in os.walk(os.getcwd()):
+ for filename in files:
+ if is_ko_file(filename):
+ ko_file_list.append(os.path.join(dirpath, filename))
+ elif is_vmlinux_file(filename):
+ global vmlinux_file
+ vmlinux_file = os.path.join(dirpath, filename)
+ print "Collecting object files [DONE]"
+
+def add_ko_file(filename):
+ p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
+ output = p.communicate()[0].splitlines()
+ if len(output) > 2:
+ sizes = output[-1].split()[0:4]
+ if verbose:
+ print " %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
+ print "%s" % filename[len(os.getcwd()) + 1:]
+ global n_ko_files, ko_text, ko_data, ko_bss, ko_total
+ ko_text += int(sizes[0])
+ ko_data += int(sizes[1])
+ ko_bss += int(sizes[2])
+ ko_total += int(sizes[3])
+ n_ko_files += 1
+
+def get_vmlinux_totals():
+ p = Popen("size -t " + vmlinux_file, shell=True, stdout=PIPE, stderr=PIPE)
+ output = p.communicate()[0].splitlines()
+ if len(output) > 2:
+ sizes = output[-1].split()[0:4]
+ if verbose:
+ print " %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
+ print "%s" % vmlinux_file[len(os.getcwd()) + 1:]
+ global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
+ vmlinux_text += int(sizes[0])
+ vmlinux_data += int(sizes[1])
+ vmlinux_bss += int(sizes[2])
+ vmlinux_total += int(sizes[3])
+
+def sum_ko_files():
+ for ko_file in ko_file_list:
+ add_ko_file(ko_file)
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "vh", ["help"])
+ except getopt.GetoptError as err:
+ print('%s' % str(err))
+ usage()
+ sys.exit(2)
+
+ for o, a in opts:
+ if o == '-v':
+ global verbose
+ verbose = True
+ elif o in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+ else:
+ assert False, "unhandled option"
+
+ collect_object_files()
+ sum_ko_files()
+ get_vmlinux_totals()
+
+ print "\nTotals:"
+ print "\nvmlinux:"
+ print " text\tdata\t\tbss\t\ttotal"
+ print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total)
+ print "\nmodules (%d):" % n_ko_files
+ print " text\tdata\t\tbss\t\ttotal"
+ print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ (ko_text, ko_data, ko_bss, ko_total)
+ print "\nvmlinux + modules:"
+ print " text\tdata\t\tbss\t\ttotal"
+ print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text + ko_text, vmlinux_data + ko_data, \
+ vmlinux_bss + ko_bss, vmlinux_total + ko_total)
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc(5)
+ sys.exit(ret)
diff --git a/poky/scripts/verify-bashisms b/poky/scripts/verify-bashisms
new file mode 100755
index 000000000..a979bd296
--- /dev/null
+++ b/poky/scripts/verify-bashisms
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+
+import sys, os, subprocess, re, shutil
+
+whitelist = (
+ # type is supported by dash
+ 'if type systemctl >/dev/null 2>/dev/null; then',
+ 'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
+ 'type update-rc.d >/dev/null 2>/dev/null; then',
+ 'command -v',
+ # HOSTNAME is set locally
+ 'buildhistory_single_commit "$CMDLINE" "$HOSTNAME"',
+ # False-positive, match is a grep not shell expression
+ 'grep "^$groupname:[^:]*:[^:]*:\\([^,]*,\\)*$username\\(,[^,]*\\)*"',
+ # TODO verify dash's '. script args' behaviour
+ '. $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE'
+ )
+
+def is_whitelisted(s):
+ for w in whitelist:
+ if w in s:
+ return True
+ return False
+
+SCRIPT_LINENO_RE = re.compile(r' line (\d+) ')
+BASHISM_WARNING = re.compile(r'^(possible bashism in.*)$', re.MULTILINE)
+
+def process(filename, function, lineno, script):
+ import tempfile
+
+ if not script.startswith("#!"):
+ script = "#! /bin/sh\n" + script
+
+ fn = tempfile.NamedTemporaryFile(mode="w+t")
+ fn.write(script)
+ fn.flush()
+
+ try:
+ subprocess.check_output(("checkbashisms.pl", fn.name), universal_newlines=True, stderr=subprocess.STDOUT)
+ # No bashisms, so just return
+ return
+ except subprocess.CalledProcessError as e:
+ # TODO check exit code is 1
+
+ # Replace the temporary filename with the function and split it
+ output = e.output.replace(fn.name, function)
+ if not output or not output.startswith('possible bashism'):
+ # Probably starts with or contains only warnings. Dump verbatim
+ # with one space indention. Can't do the splitting and whitelist
+ # checking below.
+ return '\n'.join([filename,
+ ' Unexpected output from checkbashisms.pl'] +
+ [' ' + x for x in output.splitlines()])
+
+ # We know that the first line matches and that therefore the first
+ # list entry will be empty - skip it.
+ output = BASHISM_WARNING.split(output)[1:]
+ # Turn the output into a single string like this:
+ # /.../foobar.bb
+ # possible bashism in updatercd_postrm line 2 (type):
+ # if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
+ # ...
+ # ...
+ result = []
+ # Check the results against the whitelist
+ for message, source in zip(output[0::2], output[1::2]):
+ if not is_whitelisted(source):
+ if lineno is not None:
+ message = SCRIPT_LINENO_RE.sub(lambda m: ' line %d ' % (int(m.group(1)) + int(lineno) - 1),
+ message)
+ result.append(' ' + message.strip())
+ result.extend([' %s' % x for x in source.splitlines()])
+ if result:
+ result.insert(0, filename)
+ return '\n'.join(result)
+ else:
+ return None
+
+def get_tinfoil():
+ scripts_path = os.path.dirname(os.path.realpath(__file__))
+ lib_path = scripts_path + '/lib'
+ sys.path = sys.path + [lib_path]
+ import scriptpath
+ scriptpath.add_bitbake_lib_path()
+ import bb.tinfoil
+ tinfoil = bb.tinfoil.Tinfoil()
+ tinfoil.prepare()
+ # tinfoil.logger.setLevel(logging.WARNING)
+ return tinfoil
+
+if __name__=='__main__':
+ import argparse, shutil
+
+ parser = argparse.ArgumentParser(description='Bashim detector for shell fragments in recipes.')
+ parser.add_argument("recipes", metavar="RECIPE", nargs="*", help="recipes to check (if not specified, all will be checked)")
+ parser.add_argument("--verbose", default=False, action="store_true")
+ args = parser.parse_args()
+
+ if shutil.which("checkbashisms.pl") is None:
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
+ sys.exit(1)
+
+ # The order of defining the worker function,
+ # initializing the pool and connecting to the
+ # bitbake server is crucial, don't change it.
+ def func(item):
+ (filename, key, lineno), script = item
+ if args.verbose:
+ print("Scanning %s:%s" % (filename, key))
+ return process(filename, key, lineno, script)
+
+ import multiprocessing
+ pool = multiprocessing.Pool()
+
+ tinfoil = get_tinfoil()
+
+ # This is only the default configuration and should iterate over
+ # recipecaches to handle multiconfig environments
+ pkg_pn = tinfoil.cooker.recipecaches[""].pkg_pn
+
+ if args.recipes:
+ initial_pns = args.recipes
+ else:
+ initial_pns = sorted(pkg_pn)
+
+ pns = set()
+ scripts = {}
+ print("Generating scripts...")
+ for pn in initial_pns:
+ for fn in pkg_pn[pn]:
+ # There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
+ # (at least in general - there is some risk that the variants contain different scripts)
+ realfn, _, _ = bb.cache.virtualfn2realfn(fn)
+ if realfn not in pns:
+ pns.add(realfn)
+ data = tinfoil.parse_recipe_file(realfn)
+ for key in data.keys():
+ if data.getVarFlag(key, "func") and not data.getVarFlag(key, "python"):
+ script = data.getVar(key, False)
+ if script:
+ filename = data.getVarFlag(key, "filename")
+ lineno = data.getVarFlag(key, "lineno")
+ # There's no point in checking a function multiple
+ # times just because different recipes include it.
+ # We identify unique scripts by file, name, and (just in case)
+ # line number.
+ attributes = (filename or realfn, key, lineno)
+ scripts.setdefault(attributes, script)
+
+
+ print("Scanning scripts...\n")
+ for result in pool.imap(func, scripts.items()):
+ if result:
+ print(result)
+ tinfoil.shutdown()
diff --git a/poky/scripts/wic b/poky/scripts/wic
new file mode 100755
index 000000000..7392bc4e7
--- /dev/null
+++ b/poky/scripts/wic
@@ -0,0 +1,542 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION 'wic' is the OpenEmbedded Image Creator that users can
+# use to generate bootable images. Invoking it without any arguments
+# will display help screens for the 'wic' command and list the
+# available 'wic' subcommands. Invoking a subcommand without any
+# arguments will likewise display help screens for the specified
+# subcommand. Please use that interface for detailed help.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+__version__ = "0.2.0"
+
+# Python Standard Library modules
+import os
+import sys
+import argparse
+import logging
+
+from collections import namedtuple
+from distutils import spawn
+
+# External modules
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_oe_lib_path()
+
+# Check whether wic is running within eSDK environment
+sdkroot = scripts_path
+if os.environ.get('SDKTARGETSYSROOT'):
+ while sdkroot != '' and sdkroot != os.sep:
+ if os.path.exists(os.path.join(sdkroot, '.devtoolbase')):
+ # Set BUILDDIR for wic to work within eSDK
+ os.environ['BUILDDIR'] = sdkroot
+ # .devtoolbase only exists within eSDK
+ # If found, initialize bitbake path for eSDK environment and append to PATH
+ sdkroot = os.path.join(os.path.dirname(scripts_path), 'bitbake', 'bin')
+ os.environ['PATH'] += ":" + sdkroot
+ break
+ sdkroot = os.path.dirname(sdkroot)
+
+bitbake_exe = spawn.find_executable('bitbake')
+if bitbake_exe:
+ bitbake_path = scriptpath.add_bitbake_lib_path()
+ from bb import cookerdata
+ from bb.main import bitbake_main, BitBakeConfigParameters
+else:
+ bitbake_main = None
+
+from wic import WicError
+from wic.misc import get_bitbake_var, BB_VARS
+from wic import engine
+from wic import help as hlp
+
+
+def wic_logger():
+ """Create and convfigure wic logger."""
+ logger = logging.getLogger('wic')
+ logger.setLevel(logging.INFO)
+
+ handler = logging.StreamHandler()
+
+ formatter = logging.Formatter('%(levelname)s: %(message)s')
+ handler.setFormatter(formatter)
+
+ logger.addHandler(handler)
+
+ return logger
+
+logger = wic_logger()
+
+def rootfs_dir_to_args(krootfs_dir):
+ """
+ Get a rootfs_dir dict and serialize to string
+ """
+ rootfs_dir = ''
+ for key, val in krootfs_dir.items():
+ rootfs_dir += ' '
+ rootfs_dir += '='.join([key, val])
+ return rootfs_dir.strip()
+
+
+class RootfsArgAction(argparse.Action):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ def __call__(self, parser, namespace, value, option_string=None):
+ if not "rootfs_dir" in vars(namespace) or \
+ not type(namespace.__dict__['rootfs_dir']) is dict:
+ namespace.__dict__['rootfs_dir'] = {}
+
+ if '=' in value:
+ (key, rootfs_dir) = value.split('=')
+ else:
+ key = 'ROOTFS_DIR'
+ rootfs_dir = value
+
+ namespace.__dict__['rootfs_dir'][key] = rootfs_dir
+
+
+def wic_create_subcommand(options, usage_str):
+ """
+ Command-line handling for image creation. The real work is done
+ by image.engine.wic_create()
+ """
+ if options.build_rootfs and not bitbake_main:
+ raise WicError("Can't build rootfs as bitbake is not in the $PATH")
+
+ if not options.image_name:
+ missed = []
+ for val, opt in [(options.rootfs_dir, 'rootfs-dir'),
+ (options.bootimg_dir, 'bootimg-dir'),
+ (options.kernel_dir, 'kernel-dir'),
+ (options.native_sysroot, 'native-sysroot')]:
+ if not val:
+ missed.append(opt)
+ if missed:
+ raise WicError("The following build artifacts are not specified: %s" %
+ ", ".join(missed))
+
+ if options.image_name:
+ BB_VARS.default_image = options.image_name
+ else:
+ options.build_check = False
+
+ if options.vars_dir:
+ BB_VARS.vars_dir = options.vars_dir
+
+ if options.build_check and not engine.verify_build_env():
+ raise WicError("Couldn't verify build environment, exiting")
+
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+
+ if options.image_name:
+ if options.build_rootfs:
+ argv = ["bitbake", options.image_name]
+ if options.debug:
+ argv.append("--debug")
+
+ logger.info("Building rootfs...\n")
+ if bitbake_main(BitBakeConfigParameters(argv),
+ cookerdata.CookerConfiguration()):
+ raise WicError("bitbake exited with error")
+
+ rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
+ bootimg_dir = get_bitbake_var("STAGING_DATADIR", options.image_name)
+
+ native_sysroot = options.native_sysroot
+ if options.vars_dir and not native_sysroot:
+ native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", options.image_name)
+ else:
+ if options.build_rootfs:
+ raise WicError("Image name is not specified, exiting. "
+ "(Use -e/--image-name to specify it)")
+ native_sysroot = options.native_sysroot
+
+ if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
+ logger.info("Building wic-tools...\n")
+ if bitbake_main(BitBakeConfigParameters("bitbake wic-tools".split()),
+ cookerdata.CookerConfiguration()):
+ raise WicError("bitbake wic-tools failed")
+ native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ if not native_sysroot:
+ raise WicError("Unable to find the location of the native tools sysroot")
+
+ wks_file = options.wks_file
+
+ if not wks_file.endswith(".wks"):
+ wks_file = engine.find_canned_image(scripts_path, wks_file)
+ if not wks_file:
+ raise WicError("No image named %s found, exiting. (Use 'wic list images' "
+ "to list available images, or specify a fully-qualified OE "
+ "kickstart (.wks) filename)" % options.wks_file)
+
+ if not options.image_name:
+ rootfs_dir = ''
+ if 'ROOTFS_DIR' in options.rootfs_dir:
+ rootfs_dir = options.rootfs_dir['ROOTFS_DIR']
+ bootimg_dir = options.bootimg_dir
+ kernel_dir = options.kernel_dir
+ native_sysroot = options.native_sysroot
+ if rootfs_dir and not os.path.isdir(rootfs_dir):
+ raise WicError("--rootfs-dir (-r) not found, exiting")
+ if not os.path.isdir(bootimg_dir):
+ raise WicError("--bootimg-dir (-b) not found, exiting")
+ if not os.path.isdir(kernel_dir):
+ raise WicError("--kernel-dir (-k) not found, exiting")
+ if not os.path.isdir(native_sysroot):
+ raise WicError("--native-sysroot (-n) not found, exiting")
+ else:
+ not_found = not_found_dir = ""
+ if not os.path.isdir(rootfs_dir):
+ (not_found, not_found_dir) = ("rootfs-dir", rootfs_dir)
+ elif not os.path.isdir(kernel_dir):
+ (not_found, not_found_dir) = ("kernel-dir", kernel_dir)
+ elif not os.path.isdir(native_sysroot):
+ (not_found, not_found_dir) = ("native-sysroot", native_sysroot)
+ if not_found:
+ if not not_found_dir:
+ not_found_dir = "Completely missing artifact - wrong image (.wks) used?"
+ logger.info("Build artifacts not found, exiting.")
+ logger.info(" (Please check that the build artifacts for the machine")
+ logger.info(" selected in local.conf actually exist and that they")
+ logger.info(" are the correct artifacts for the image (.wks file)).\n")
+ raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+
+ krootfs_dir = options.rootfs_dir
+ if krootfs_dir is None:
+ krootfs_dir = {}
+ krootfs_dir['ROOTFS_DIR'] = rootfs_dir
+
+ rootfs_dir = rootfs_dir_to_args(krootfs_dir)
+
+ logger.info("Creating image(s)...\n")
+ engine.wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+ native_sysroot, options)
+
+
+def wic_list_subcommand(args, usage_str):
+ """
+ Command-line handling for listing available images.
+ The real work is done by image.engine.wic_list()
+ """
+ if not engine.wic_list(args, scripts_path):
+ raise WicError("Bad list arguments, exiting")
+
+
+def wic_ls_subcommand(args, usage_str):
+ """
+ Command-line handling for list content of images.
+ The real work is done by engine.wic_ls()
+ """
+ engine.wic_ls(args, args.native_sysroot)
+
+def wic_cp_subcommand(args, usage_str):
+ """
+ Command-line handling for copying files/dirs to images.
+ The real work is done by engine.wic_cp()
+ """
+ engine.wic_cp(args, args.native_sysroot)
+
+def wic_rm_subcommand(args, usage_str):
+ """
+ Command-line handling for removing files/dirs from images.
+ The real work is done by engine.wic_rm()
+ """
+ engine.wic_rm(args, args.native_sysroot)
+
+def wic_write_subcommand(args, usage_str):
+ """
+ Command-line handling for writing images.
+ The real work is done by engine.wic_write()
+ """
+ engine.wic_write(args, args.native_sysroot)
+
+def wic_help_subcommand(args, usage_str):
+ """
+ Command-line handling for help subcommand to keep the current
+ structure of the function definitions.
+ """
+ pass
+
+
+def wic_help_topic_subcommand(usage_str, help_str):
+ """
+ Display function for help 'sub-subcommands'.
+ """
+ print(help_str)
+ return
+
+
+wic_help_topic_usage = """
+"""
+
+helptopics = {
+ "plugins": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_plugins_help],
+ "overview": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_overview_help],
+ "kickstart": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_kickstart_help],
+ "create": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_create_help],
+ "ls": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_ls_help],
+ "cp": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_cp_help],
+ "rm": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_rm_help],
+ "write": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_write_help],
+ "list": [wic_help_topic_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_list_help]
+}
+
+
+def wic_init_parser_create(subparser):
+ subparser.add_argument("wks_file")
+
+ subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
+ help="name of directory to create image in")
+ subparser.add_argument("-e", "--image-name", dest="image_name",
+ help="name of the image to use the artifacts from "
+ "e.g. core-image-sato")
+ subparser.add_argument("-r", "--rootfs-dir", action=RootfsArgAction,
+ help="path to the /rootfs dir to use as the "
+ ".wks rootfs source")
+ subparser.add_argument("-b", "--bootimg-dir", dest="bootimg_dir",
+ help="path to the dir containing the boot artifacts "
+ "(e.g. /EFI or /syslinux dirs) to use as the "
+ ".wks bootimg source")
+ subparser.add_argument("-k", "--kernel-dir", dest="kernel_dir",
+ help="path to the dir containing the kernel to use "
+ "in the .wks bootimg")
+ subparser.add_argument("-n", "--native-sysroot", dest="native_sysroot",
+ help="path to the native sysroot containing the tools "
+ "to use to build the image")
+ subparser.add_argument("-s", "--skip-build-check", dest="build_check",
+ action="store_false", default=True, help="skip the build check")
+ subparser.add_argument("-f", "--build-rootfs", action="store_true", help="build rootfs")
+ subparser.add_argument("-c", "--compress-with", choices=("gzip", "bzip2", "xz"),
+ dest='compressor',
+ help="compress image with specified compressor")
+ subparser.add_argument("-m", "--bmap", action="store_true", help="generate .bmap")
+ subparser.add_argument("--no-fstab-update" ,action="store_true",
+ help="Do not change fstab file.")
+ subparser.add_argument("-v", "--vars", dest='vars_dir',
+ help="directory with <image>.env files that store "
+ "bitbake variables")
+ subparser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+ return
+
+
+def wic_init_parser_list(subparser):
+ subparser.add_argument("list_type",
+ help="can be 'images' or 'source-plugins' "
+ "to obtain a list. "
+ "If value is a valid .wks image file")
+ subparser.add_argument("help_for", default=[], nargs='*',
+ help="If 'list_type' is a valid .wks image file "
+ "this value can be 'help' to show the help information "
+ "defined inside the .wks file")
+ return
+
+def imgtype(arg):
+ """
+ Custom type for ArgumentParser
+ Converts path spec to named tuple: (image, partition, path)
+ """
+ image = arg
+ part = path = None
+ if ':' in image:
+ image, part = image.split(':')
+ if '/' in part:
+ part, path = part.split('/', 1)
+ if not path:
+ path = '/'
+
+ if not os.path.isfile(image):
+ err = "%s is not a regular file or symlink" % image
+ raise argparse.ArgumentTypeError(err)
+
+ return namedtuple('ImgType', 'image part path')(image, part, path)
+
+def wic_init_parser_ls(subparser):
+ subparser.add_argument("path", type=imgtype,
+ help="image spec: <image>[:<vfat partition>[<path>]]")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def imgpathtype(arg):
+ img = imgtype(arg)
+ if img.part is None:
+ raise argparse.ArgumentTypeError("partition number is not specified")
+ return img
+
+def wic_init_parser_cp(subparser):
+ subparser.add_argument("src",
+ help="source spec")
+ subparser.add_argument("dest", type=imgpathtype,
+ help="image spec: <image>:<vfat partition>[<path>]")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def wic_init_parser_rm(subparser):
+ subparser.add_argument("path", type=imgpathtype,
+ help="path: <image>:<vfat partition><path>")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def expandtype(rules):
+ """
+ Custom type for ArgumentParser
+ Converts expand rules to the dictionary {<partition>: size}
+ """
+ if rules == 'auto':
+ return {}
+ result = {}
+ for rule in rules.split('-'):
+ try:
+ part, size = rule.split(':')
+ except ValueError:
+ raise argparse.ArgumentTypeError("Incorrect rule format: %s" % rule)
+
+ if not part.isdigit():
+ raise argparse.ArgumentTypeError("Rule '%s': partition number must be integer" % rule)
+
+ # validate size
+ multiplier = 1
+ for suffix, mult in [('K', 1024), ('M', 1024 * 1024), ('G', 1024 * 1024 * 1024)]:
+ if size.upper().endswith(suffix):
+ multiplier = mult
+ size = size[:-1]
+ break
+ if not size.isdigit():
+ raise argparse.ArgumentTypeError("Rule '%s': size must be integer" % rule)
+
+ result[int(part)] = int(size) * multiplier
+
+ return result
+
+def wic_init_parser_write(subparser):
+ subparser.add_argument("image",
+ help="path to the wic image")
+ subparser.add_argument("target",
+ help="target file or device")
+ subparser.add_argument("-e", "--expand", type=expandtype,
+ help="expand rules: auto or <partition>:<size>[,<partition>:<size>]")
+ subparser.add_argument("-n", "--native-sysroot",
+ help="path to the native sysroot containing the tools")
+
+def wic_init_parser_help(subparser):
+ helpparsers = subparser.add_subparsers(dest='help_topic', help=hlp.wic_usage)
+ for helptopic in helptopics:
+ helpparsers.add_parser(helptopic, help=helptopics[helptopic][2])
+ return
+
+
+subcommands = {
+ "create": [wic_create_subcommand,
+ hlp.wic_create_usage,
+ hlp.wic_create_help,
+ wic_init_parser_create],
+ "list": [wic_list_subcommand,
+ hlp.wic_list_usage,
+ hlp.wic_list_help,
+ wic_init_parser_list],
+ "ls": [wic_ls_subcommand,
+ hlp.wic_ls_usage,
+ hlp.wic_ls_help,
+ wic_init_parser_ls],
+ "cp": [wic_cp_subcommand,
+ hlp.wic_cp_usage,
+ hlp.wic_cp_help,
+ wic_init_parser_cp],
+ "rm": [wic_rm_subcommand,
+ hlp.wic_rm_usage,
+ hlp.wic_rm_help,
+ wic_init_parser_rm],
+ "write": [wic_write_subcommand,
+ hlp.wic_write_usage,
+ hlp.wic_write_help,
+ wic_init_parser_write],
+ "help": [wic_help_subcommand,
+ wic_help_topic_usage,
+ hlp.wic_help_help,
+ wic_init_parser_help]
+}
+
+
+def init_parser(parser):
+ parser.add_argument("--version", action="version",
+ version="%(prog)s {version}".format(version=__version__))
+ subparsers = parser.add_subparsers(dest='command', help=hlp.wic_usage)
+ for subcmd in subcommands:
+ subparser = subparsers.add_parser(subcmd, help=subcommands[subcmd][2])
+ subcommands[subcmd][3](subparser)
+
+
+def main(argv):
+ parser = argparse.ArgumentParser(
+ description="wic version %s" % __version__)
+
+ init_parser(parser)
+
+ args = parser.parse_args(argv)
+
+ if "command" in vars(args):
+ if args.command == "help":
+ if args.help_topic is None:
+ parser.print_help()
+ print()
+ print("Please specify a help topic")
+ elif args.help_topic in helptopics:
+ hlpt = helptopics[args.help_topic]
+ hlpt[0](hlpt[1], hlpt[2])
+ return 0
+
+ return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
+
+
+if __name__ == "__main__":
+ try:
+ sys.exit(main(sys.argv[1:]))
+ except WicError as err:
+ print()
+ logger.error(err)
+ sys.exit(1)
diff --git a/poky/scripts/yocto-check-layer b/poky/scripts/yocto-check-layer
new file mode 100755
index 000000000..5a4fd752c
--- /dev/null
+++ b/poky/scripts/yocto-check-layer
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+
+# Yocto Project layer checking tool
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import argparse
+import logging
+import time
+import signal
+import shutil
+import collections
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
+
+from checklayer import LayerType, detect_layers, add_layer, add_layer_dependencies, get_signatures
+from oeqa.utils.commands import get_bb_vars
+
+PROGNAME = 'yocto-check-layer'
+CASES_PATHS = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'lib', 'checklayer', 'cases')]
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+def test_layer(td, layer, test_software_layer_signatures):
+ from checklayer.context import CheckLayerTestContext
+ logger.info("Starting to analyze: %s" % layer['name'])
+ logger.info("----------------------------------------------------------------------")
+
+ tc = CheckLayerTestContext(td=td, logger=logger, layer=layer, test_software_layer_signatures=test_software_layer_signatures)
+ tc.loadTests(CASES_PATHS)
+ return tc.runTests()
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Yocto Project layer checking tool",
+ add_help=False)
+ parser.add_argument('layers', metavar='LAYER_DIR', nargs='+',
+ help='Layer to check')
+ parser.add_argument('-o', '--output-log',
+ help='File to output log (optional)', action='store')
+ parser.add_argument('--dependency', nargs="+",
+ help='Layers to process for dependencies', action='store')
+ parser.add_argument('--machines', nargs="+",
+ help='List of MACHINEs to be used during testing', action='store')
+ parser.add_argument('--additional-layers', nargs="+",
+ help='List of additional layers to add during testing', action='store')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--with-software-layer-signature-check', action='store_true', dest='test_software_layer_signatures',
+ default=True,
+ help='check that software layers do not change signatures (on by default)')
+ group.add_argument('--without-software-layer-signature-check', action='store_false', dest='test_software_layer_signatures',
+ help='disable signature checking for software layers')
+ parser.add_argument('-n', '--no-auto', help='Disable auto layer discovery',
+ action='store_true')
+ parser.add_argument('-d', '--debug', help='Enable debug output',
+ action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors',
+ action='store_true')
+
+ parser.add_argument('-h', '--help', action='help',
+ default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ args = parser.parse_args()
+
+ if args.output_log:
+ fh = logging.FileHandler(args.output_log)
+ fh.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ logger.addHandler(fh)
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if not 'BUILDDIR' in os.environ:
+ logger.error("You must source the environment before run this script.")
+ logger.error("$ source oe-init-build-env")
+ return 1
+ builddir = os.environ['BUILDDIR']
+ bblayersconf = os.path.join(builddir, 'conf', 'bblayers.conf')
+
+ layers = detect_layers(args.layers, args.no_auto)
+ if not layers:
+ logger.error("Fail to detect layers")
+ return 1
+ if args.additional_layers:
+ additional_layers = detect_layers(args.additional_layers, args.no_auto)
+ else:
+ additional_layers = []
+ if args.dependency:
+ dep_layers = detect_layers(args.dependency, args.no_auto)
+ dep_layers = dep_layers + layers
+ else:
+ dep_layers = layers
+
+ logger.info("Detected layers:")
+ for layer in layers:
+ if layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ logger.error("%s: Can't be DISTRO and BSP type at the same time."\
+ " The conf/distro and conf/machine folders was found."\
+ % layer['name'])
+ layers.remove(layer)
+ elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
+ logger.error("%s: Don't have conf/layer.conf file."\
+ % layer['name'])
+ layers.remove(layer)
+ else:
+ logger.info("%s: %s, %s" % (layer['name'], layer['type'],
+ layer['path']))
+ if not layers:
+ return 1
+
+ shutil.copyfile(bblayersconf, bblayersconf + '.backup')
+ def cleanup_bblayers(signum, frame):
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+ os.unlink(bblayersconf + '.backup')
+ signal.signal(signal.SIGTERM, cleanup_bblayers)
+ signal.signal(signal.SIGINT, cleanup_bblayers)
+
+ td = {}
+ results = collections.OrderedDict()
+ results_status = collections.OrderedDict()
+
+ layers_tested = 0
+ for layer in layers:
+ if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
+ layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ continue
+
+ logger.info('')
+ logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
+ layer['path']))
+
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+ missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
+ if not missing_dependencies:
+ for additional_layer in additional_layers:
+ if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
+ missing_dependencies = True
+ break
+ if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
+ any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
+ additional_layers)):
+ logger.info('Skipping %s due to missing dependencies.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
+ layers_tested = layers_tested + 1
+ continue
+
+ if any(map(lambda additional_layer: not add_layer(bblayersconf, additional_layer, dep_layers, logger),
+ additional_layers)):
+ logger.info('Skipping %s due to missing additional layers.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Missing additional layers)'
+ layers_tested = layers_tested + 1
+ continue
+
+ logger.info('Getting initial bitbake variables ...')
+ td['bbvars'] = get_bb_vars()
+ logger.info('Getting initial signatures ...')
+ td['builddir'] = builddir
+ td['sigs'], td['tunetasks'] = get_signatures(td['builddir'])
+ td['machines'] = args.machines
+
+ if not add_layer(bblayersconf, layer, dep_layers, logger):
+ logger.info('Skipping %s ???.' % layer['name'])
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Unknown)'
+ layers_tested = layers_tested + 1
+ continue
+
+ result = test_layer(td, layer, args.test_software_layer_signatures)
+ results[layer['name']] = result
+ results_status[layer['name']] = 'PASS' if results[layer['name']].wasSuccessful() else 'FAIL'
+ layers_tested = layers_tested + 1
+
+ ret = 0
+ if layers_tested:
+ logger.info('')
+ logger.info('Summary of results:')
+ logger.info('')
+ for layer_name in results_status:
+ logger.info('%s ... %s' % (layer_name, results_status[layer_name]))
+ if not results[layer_name] or not results[layer_name].wasSuccessful():
+ ret = 2 # ret = 1 used for initialization errors
+
+ cleanup_bblayers(None, None)
+
+ return ret
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/scripts/yocto-check-layer-wrapper b/poky/scripts/yocto-check-layer-wrapper
new file mode 100755
index 000000000..bbf6ee176
--- /dev/null
+++ b/poky/scripts/yocto-check-layer-wrapper
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# Yocto Project layer check tool wrapper
+#
+# Creates a temporary build directory to run the yocto-check-layer
+# script to avoid a contaminated environment.
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+if [ -z "$BUILDDIR" ]; then
+ echo "Please source oe-init-build-env before run this script."
+ exit 2
+fi
+
+# since we are using a temp directory, use the realpath for output
+# log option
+output_log=''
+while getopts o: name
+do
+ case $name in
+ o) output_log=$(realpath "$OPTARG")
+ esac
+done
+shift $(($OPTIND - 1))
+
+# generate a temp directory to run check layer script
+base_dir=$(realpath $BUILDDIR/../)
+cd $base_dir
+
+build_dir=$(mktemp -p $base_dir -d -t build-XXXX)
+
+source oe-init-build-env $build_dir
+if [[ $output_log != '' ]]; then
+ yocto-check-layer -o "$output_log" "$*"
+else
+ yocto-check-layer "$@"
+fi
+retcode=$?
+
+rm -rf $build_dir
+
+exit $retcode
OpenPOWER on IntegriCloud