summaryrefslogtreecommitdiffstats
path: root/poky/meta/lib
diff options
context:
space:
mode:
authorDave Cobbley <david.j.cobbley@linux.intel.com>2018-08-14 10:05:37 -0700
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2018-08-22 21:26:31 -0400
commiteb8dc40360f0cfef56fb6947cc817a547d6d9bc6 (patch)
treede291a73dc37168da6370e2cf16c347d1eba9df8 /poky/meta/lib
parent9c3cf826d853102535ead04cebc2d6023eff3032 (diff)
downloadtalos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.tar.gz
talos-openbmc-eb8dc40360f0cfef56fb6947cc817a547d6d9bc6.zip
[Subtree] Removing import-layers directory
As part of the move to subtrees, need to bring all the import layers content to the top level. Change-Id: I4a163d10898cbc6e11c27f776f60e1a470049d8f Signed-off-by: Dave Cobbley <david.j.cobbley@linux.intel.com> Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'poky/meta/lib')
-rw-r--r--poky/meta/lib/bblayers/create.py66
-rw-r--r--poky/meta/lib/bblayers/templates/README41
-rw-r--r--poky/meta/lib/bblayers/templates/example.bb11
-rw-r--r--poky/meta/lib/bblayers/templates/layer.conf10
-rw-r--r--poky/meta/lib/buildstats.py158
-rw-r--r--poky/meta/lib/oe/__init__.py2
-rw-r--r--poky/meta/lib/oe/buildhistory_analysis.py665
-rw-r--r--poky/meta/lib/oe/cachedpath.py233
-rw-r--r--poky/meta/lib/oe/classextend.py122
-rw-r--r--poky/meta/lib/oe/classutils.py44
-rw-r--r--poky/meta/lib/oe/copy_buildsystem.py262
-rw-r--r--poky/meta/lib/oe/data.py47
-rw-r--r--poky/meta/lib/oe/distro_check.py308
-rw-r--r--poky/meta/lib/oe/gpg_sign.py128
-rw-r--r--poky/meta/lib/oe/license.py243
-rw-r--r--poky/meta/lib/oe/lsb.py117
-rw-r--r--poky/meta/lib/oe/maketype.py102
-rw-r--r--poky/meta/lib/oe/manifest.py344
-rw-r--r--poky/meta/lib/oe/package.py294
-rw-r--r--poky/meta/lib/oe/package_manager.py1787
-rw-r--r--poky/meta/lib/oe/packagedata.py95
-rw-r--r--poky/meta/lib/oe/packagegroup.py36
-rw-r--r--poky/meta/lib/oe/patch.py895
-rw-r--r--poky/meta/lib/oe/path.py261
-rw-r--r--poky/meta/lib/oe/prservice.py126
-rw-r--r--poky/meta/lib/oe/qa.py171
-rw-r--r--poky/meta/lib/oe/recipeutils.py971
-rw-r--r--poky/meta/lib/oe/rootfs.py973
-rw-r--r--poky/meta/lib/oe/sdk.py473
-rw-r--r--poky/meta/lib/oe/sstatesig.py404
-rw-r--r--poky/meta/lib/oe/terminal.py308
-rw-r--r--poky/meta/lib/oe/types.py153
-rw-r--r--poky/meta/lib/oe/useradd.py68
-rw-r--r--poky/meta/lib/oe/utils.py421
-rw-r--r--poky/meta/lib/oeqa/buildperf/__init__.py19
-rw-r--r--poky/meta/lib/oeqa/buildperf/base.py511
-rw-r--r--poky/meta/lib/oeqa/buildperf/test_basic.py127
-rw-r--r--poky/meta/lib/oeqa/controllers/__init__.py3
-rw-r--r--poky/meta/lib/oeqa/controllers/masterimage.py239
-rw-r--r--poky/meta/lib/oeqa/controllers/testtargetloader.py68
-rw-r--r--poky/meta/lib/oeqa/core/README76
-rw-r--r--poky/meta/lib/oeqa/core/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/core/case.py46
-rw-r--r--poky/meta/lib/oeqa/core/cases/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/core/cases/example/data.json1
-rw-r--r--poky/meta/lib/oeqa/core/cases/example/test_basic.py20
-rw-r--r--poky/meta/lib/oeqa/core/context.py191
-rw-r--r--poky/meta/lib/oeqa/core/decorator/__init__.py71
-rw-r--r--poky/meta/lib/oeqa/core/decorator/data.py98
-rw-r--r--poky/meta/lib/oeqa/core/decorator/depends.py100
-rw-r--r--poky/meta/lib/oeqa/core/decorator/oeid.py23
-rw-r--r--poky/meta/lib/oeqa/core/decorator/oetag.py24
-rw-r--r--poky/meta/lib/oeqa/core/decorator/oetimeout.py45
-rw-r--r--poky/meta/lib/oeqa/core/exception.py23
-rw-r--r--poky/meta/lib/oeqa/core/loader.py355
-rw-r--r--poky/meta/lib/oeqa/core/runner.py277
-rw-r--r--poky/meta/lib/oeqa/core/target/__init__.py33
-rw-r--r--poky/meta/lib/oeqa/core/target/qemu.py45
-rw-r--r--poky/meta/lib/oeqa/core/target/ssh.py267
-rw-r--r--poky/meta/lib/oeqa/core/tests/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/data.py20
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/depends.py38
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/invalid/oeid.py15
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py12
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py8
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py10
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py12
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/loader/valid/another.py9
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/oeid.py18
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/oetag.py18
-rw-r--r--poky/meta/lib/oeqa/core/tests/cases/timeout.py18
-rw-r--r--poky/meta/lib/oeqa/core/tests/common.py45
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_data.py51
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_decorators.py147
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_loader.py114
-rwxr-xr-xpoky/meta/lib/oeqa/core/tests/test_runner.py38
-rw-r--r--poky/meta/lib/oeqa/core/threaded.py275
-rw-r--r--poky/meta/lib/oeqa/core/utils/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/core/utils/misc.py44
-rw-r--r--poky/meta/lib/oeqa/core/utils/path.py19
-rw-r--r--poky/meta/lib/oeqa/core/utils/test.py86
-rw-r--r--poky/meta/lib/oeqa/files/test.c26
-rw-r--r--poky/meta/lib/oeqa/files/test.cpp3
-rw-r--r--poky/meta/lib/oeqa/files/test.pl2
-rw-r--r--poky/meta/lib/oeqa/files/test.py6
-rw-r--r--poky/meta/lib/oeqa/oetest.py616
-rwxr-xr-xpoky/meta/lib/oeqa/runexported.py153
-rw-r--r--poky/meta/lib/oeqa/runtime/case.py17
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/_qemutiny.py8
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/apt.py47
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/buildcpio.py29
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/buildgalculator.py28
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/buildlzip.py34
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/connman.py30
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/date.py38
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/df.py13
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/dnf.py123
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/gcc.py73
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/gi.py15
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/kernelmodule.py40
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/ldd.py25
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/logrotate.py42
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/multilib.py41
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/oe_syslog.py66
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/opkg.py47
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/pam.py33
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/parselogs.py363
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/perl.py37
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/ping.py24
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/ptest.py93
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/python.py43
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/rpm.py142
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/scanelf.py26
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/scp.py33
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/skeletoninit.py33
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/ssh.py15
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/stap.py33
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/systemd.py181
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/x32lib.py19
-rw-r--r--poky/meta/lib/oeqa/runtime/cases/xorg.py17
-rw-r--r--poky/meta/lib/oeqa/runtime/context.py226
-rw-r--r--poky/meta/lib/oeqa/runtime/decorator/package.py53
-rw-r--r--poky/meta/lib/oeqa/runtime/files/hello.stp1
-rw-r--r--poky/meta/lib/oeqa/runtime/files/hellomod.c19
-rw-r--r--poky/meta/lib/oeqa/runtime/files/hellomod_makefile8
-rw-r--r--poky/meta/lib/oeqa/runtime/files/testmakefile5
-rw-r--r--poky/meta/lib/oeqa/runtime/loader.py16
-rw-r--r--poky/meta/lib/oeqa/runtime/utils/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/runtime/utils/targetbuildproject.py39
-rw-r--r--poky/meta/lib/oeqa/sdk/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/sdk/case.py12
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/buildcpio.py33
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/buildgalculator.py35
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/buildlzip.py36
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/gcc.py43
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/perl.py28
-rw-r--r--poky/meta/lib/oeqa/sdk/cases/python.py32
-rw-r--r--poky/meta/lib/oeqa/sdk/context.py134
-rw-r--r--poky/meta/lib/oeqa/sdk/files/testsdkmakefile5
-rw-r--r--poky/meta/lib/oeqa/sdk/utils/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py45
-rw-r--r--poky/meta/lib/oeqa/sdkext/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/sdkext/case.py21
-rw-r--r--poky/meta/lib/oeqa/sdkext/cases/devtool.py129
-rw-r--r--poky/meta/lib/oeqa/sdkext/context.py29
-rw-r--r--poky/meta/lib/oeqa/sdkext/files/myapp/Makefile10
-rw-r--r--poky/meta/lib/oeqa/sdkext/files/myapp/myapp.c9
-rw-r--r--poky/meta/lib/oeqa/sdkext/files/myapp_cmake/CMakeLists.txt11
-rw-r--r--poky/meta/lib/oeqa/sdkext/files/myapp_cmake/myapp.c9
-rw-r--r--poky/meta/lib/oeqa/selftest/case.py278
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py92
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/archiver.py131
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/bblayers.py97
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/bbtests.py278
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/buildhistory.py46
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/buildoptions.py180
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/containerimage.py85
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/devtool.py1716
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/distrodata.py99
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/eSDK.py111
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/efibootpartition.py45
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/gotoolchain.py67
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/image_typedep.py53
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/imagefeatures.py240
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/layerappend.py95
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/liboe.py102
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/lic_checksum.py35
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/manifest.py166
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/meta_ide.py49
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/__init__.py0
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py99
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/elf.py21
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/license.py99
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/path.py89
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/types.py50
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oelib/utils.py51
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/oescripts.py15
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/package.py86
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/pkgdata.py224
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/prservice.py131
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/recipetool.py698
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/runcmd.py134
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/runqemu.py206
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/runtime_test.py260
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/selftest.py51
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/signing.py187
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/sstate.py63
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/sstatetests.py532
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/tinfoil.py231
-rw-r--r--poky/meta/lib/oeqa/selftest/cases/wic.py1066
-rw-r--r--poky/meta/lib/oeqa/selftest/context.py279
-rw-r--r--poky/meta/lib/oeqa/targetcontrol.py232
-rw-r--r--poky/meta/lib/oeqa/utils/__init__.py103
-rw-r--r--poky/meta/lib/oeqa/utils/buildproject.py55
-rw-r--r--poky/meta/lib/oeqa/utils/commands.py357
-rw-r--r--poky/meta/lib/oeqa/utils/decorators.py295
-rw-r--r--poky/meta/lib/oeqa/utils/dump.py91
-rw-r--r--poky/meta/lib/oeqa/utils/ftools.py46
-rw-r--r--poky/meta/lib/oeqa/utils/git.py80
-rw-r--r--poky/meta/lib/oeqa/utils/httpserver.py36
-rw-r--r--poky/meta/lib/oeqa/utils/logparser.py126
-rw-r--r--poky/meta/lib/oeqa/utils/metadata.py108
-rw-r--r--poky/meta/lib/oeqa/utils/network.py8
-rw-r--r--poky/meta/lib/oeqa/utils/package_manager.py211
-rw-r--r--poky/meta/lib/oeqa/utils/qemurunner.py591
-rw-r--r--poky/meta/lib/oeqa/utils/qemutinyrunner.py176
-rw-r--r--poky/meta/lib/oeqa/utils/sshcontrol.py242
-rw-r--r--poky/meta/lib/oeqa/utils/subprocesstweak.py19
-rw-r--r--poky/meta/lib/oeqa/utils/targetbuild.py139
-rw-r--r--poky/meta/lib/oeqa/utils/testexport.py263
-rw-r--r--poky/meta/lib/rootfspostcommands.py56
211 files changed, 29371 insertions, 0 deletions
diff --git a/poky/meta/lib/bblayers/create.py b/poky/meta/lib/bblayers/create.py
new file mode 100644
index 000000000..6a41fe050
--- /dev/null
+++ b/poky/meta/lib/bblayers/create.py
@@ -0,0 +1,66 @@
+import logging
+import os
+import sys
+import shutil
+
+import bb.utils
+
+from bblayers.common import LayerPlugin
+
+logger = logging.getLogger('bitbake-layers')
+
+def plugin_init(plugins):
+ return CreatePlugin()
+
+def read_template(template, template_dir='templates'):
+ lines = str()
+ with open(os.path.join(os.path.dirname(__file__), template_dir, template)) as fd:
+ lines = ''.join(fd.readlines())
+ return lines
+
+class CreatePlugin(LayerPlugin):
+ def do_create_layer(self, args):
+ """Create a basic layer"""
+ layerdir = os.path.abspath(args.layerdir)
+ if os.path.exists(layerdir):
+ sys.stderr.write("Specified layer directory exists\n")
+ return 1
+
+ # create dirs
+ conf = os.path.join(layerdir, 'conf')
+ bb.utils.mkdirhier(conf)
+
+ # Create the README from templates/README
+ readme_template = read_template('README') % (args.layerdir, args.layerdir, args.layerdir, args.layerdir, args.layerdir, args.layerdir)
+ readme = os.path.join(layerdir, 'README')
+ with open(readme, 'w') as fd:
+ fd.write(readme_template)
+
+ # Copy the MIT license from meta
+ copying = 'COPYING.MIT'
+ dn = os.path.dirname
+ license_src = os.path.join(dn(dn(dn(__file__))), copying)
+ license_dst = os.path.join(layerdir, copying)
+ shutil.copy(license_src, license_dst)
+
+ # Create the layer.conf from templates/layer.conf
+ layerconf_template = read_template('layer.conf') % (args.layerdir, args.layerdir, args.layerdir, args.priority)
+ layerconf = os.path.join(conf, 'layer.conf')
+ with open(layerconf, 'w') as fd:
+ fd.write(layerconf_template)
+
+ # Create the example from templates/example.bb
+ example_template = read_template('example.bb')
+ example = os.path.join(layerdir, 'recipes-' + args.examplerecipe, args.examplerecipe)
+ bb.utils.mkdirhier(example)
+ with open(os.path.join(example, args.examplerecipe + '.bb'), 'w') as fd:
+ fd.write(example_template)
+
+ logger.plain('Add your new layer with \'bitbake-layers add-layer %s\'' % args.layerdir)
+
+ def register_commands(self, sp):
+ parser_create_layer = self.add_command(sp, 'create-layer', self.do_create_layer, parserecipes=False)
+ parser_create_layer.add_argument('layerdir', help='Layer directory to create')
+ parser_create_layer.add_argument('--priority', '-p', default=6, help='Layer directory to create')
+ parser_create_layer.add_argument('--example-recipe-name', '-e', dest='examplerecipe', default='example', help='Filename of the example recipe')
+
diff --git a/poky/meta/lib/bblayers/templates/README b/poky/meta/lib/bblayers/templates/README
new file mode 100644
index 000000000..5a77f8d34
--- /dev/null
+++ b/poky/meta/lib/bblayers/templates/README
@@ -0,0 +1,41 @@
+This README file contains information on the contents of the %s layer.
+
+Please see the corresponding sections below for details.
+
+Dependencies
+============
+
+ URI: <first dependency>
+ branch: <branch name>
+
+ URI: <second dependency>
+ branch: <branch name>
+
+ .
+ .
+ .
+
+Patches
+=======
+
+Please submit any patches against the %s layer to the xxxx mailing list (xxxx@zzzz.org)
+and cc: the maintainer:
+
+Maintainer: XXX YYYYYY <xxx.yyyyyy@zzzzz.com>
+
+Table of Contents
+=================
+
+ I. Adding the %s layer to your build
+ II. Misc
+
+
+I. Adding the %s layer to your build
+=================================================
+
+Run 'bitbake-layers add-layer %s'
+
+II. Misc
+========
+
+--- replace with specific information about the %s layer ---
diff --git a/poky/meta/lib/bblayers/templates/example.bb b/poky/meta/lib/bblayers/templates/example.bb
new file mode 100644
index 000000000..c4b873d59
--- /dev/null
+++ b/poky/meta/lib/bblayers/templates/example.bb
@@ -0,0 +1,11 @@
+SUMMARY = "bitbake-layers recipe"
+DESCRIPTION = "Recipe created by bitbake-layers"
+LICENSE = "MIT"
+
+python do_build() {
+ bb.plain("***********************************************");
+ bb.plain("* *");
+ bb.plain("* Example recipe created by bitbake-layers *");
+ bb.plain("* *");
+ bb.plain("***********************************************");
+}
diff --git a/poky/meta/lib/bblayers/templates/layer.conf b/poky/meta/lib/bblayers/templates/layer.conf
new file mode 100644
index 000000000..3c0300226
--- /dev/null
+++ b/poky/meta/lib/bblayers/templates/layer.conf
@@ -0,0 +1,10 @@
+# We have a conf and classes directory, add to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have recipes-* directories, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
+ ${LAYERDIR}/recipes-*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "%s"
+BBFILE_PATTERN_%s = "^${LAYERDIR}/"
+BBFILE_PRIORITY_%s = "%s"
diff --git a/poky/meta/lib/buildstats.py b/poky/meta/lib/buildstats.py
new file mode 100644
index 000000000..c5d4c73cf
--- /dev/null
+++ b/poky/meta/lib/buildstats.py
@@ -0,0 +1,158 @@
+# Implements system state sampling. Called by buildstats.bbclass.
+# Because it is a real Python module, it can hold persistent state,
+# like open log files and the time of the last sampling.
+
+import time
+import re
+import bb.event
+
+class SystemStats:
+ def __init__(self, d):
+ bn = d.getVar('BUILDNAME')
+ bsdir = os.path.join(d.getVar('BUILDSTATS_BASE'), bn)
+ bb.utils.mkdirhier(bsdir)
+
+ self.proc_files = []
+ for filename, handler in (
+ ('diskstats', self._reduce_diskstats),
+ ('meminfo', self._reduce_meminfo),
+ ('stat', self._reduce_stat),
+ ):
+ # The corresponding /proc files might not exist on the host.
+ # For example, /proc/diskstats is not available in virtualized
+ # environments like Linux-VServer. Silently skip collecting
+ # the data.
+ if os.path.exists(os.path.join('/proc', filename)):
+ # In practice, this class gets instantiated only once in
+ # the bitbake cooker process. Therefore 'append' mode is
+ # not strictly necessary, but using it makes the class
+ # more robust should two processes ever write
+ # concurrently.
+ destfile = os.path.join(bsdir, '%sproc_%s.log' % ('reduced_' if handler else '', filename))
+ self.proc_files.append((filename, open(destfile, 'ab'), handler))
+ self.monitor_disk = open(os.path.join(bsdir, 'monitor_disk.log'), 'ab')
+ # Last time that we sampled /proc data resp. recorded disk monitoring data.
+ self.last_proc = 0
+ self.last_disk_monitor = 0
+ # Minimum number of seconds between recording a sample. This
+ # becames relevant when we get called very often while many
+ # short tasks get started. Sampling during quiet periods
+ # depends on the heartbeat event, which fires less often.
+ self.min_seconds = 1
+
+ self.meminfo_regex = re.compile(b'^(MemTotal|MemFree|Buffers|Cached|SwapTotal|SwapFree):\s*(\d+)')
+ self.diskstats_regex = re.compile(b'^([hsv]d.|mtdblock\d|mmcblk\d|cciss/c\d+d\d+.*)$')
+ self.diskstats_ltime = None
+ self.diskstats_data = None
+ self.stat_ltimes = None
+
+ def close(self):
+ self.monitor_disk.close()
+ for _, output, _ in self.proc_files:
+ output.close()
+
+ def _reduce_meminfo(self, time, data):
+ """
+ Extracts 'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree'
+ and writes their values into a single line, in that order.
+ """
+ values = {}
+ for line in data.split(b'\n'):
+ m = self.meminfo_regex.match(line)
+ if m:
+ values[m.group(1)] = m.group(2)
+ if len(values) == 6:
+ return (time,
+ b' '.join([values[x] for x in
+ (b'MemTotal', b'MemFree', b'Buffers', b'Cached', b'SwapTotal', b'SwapFree')]) + b'\n')
+
+ def _diskstats_is_relevant_line(self, linetokens):
+ if len(linetokens) != 14:
+ return False
+ disk = linetokens[2]
+ return self.diskstats_regex.match(disk)
+
+ def _reduce_diskstats(self, time, data):
+ relevant_tokens = filter(self._diskstats_is_relevant_line, map(lambda x: x.split(), data.split(b'\n')))
+ diskdata = [0] * 3
+ reduced = None
+ for tokens in relevant_tokens:
+ # rsect
+ diskdata[0] += int(tokens[5])
+ # wsect
+ diskdata[1] += int(tokens[9])
+ # use
+ diskdata[2] += int(tokens[12])
+ if self.diskstats_ltime:
+ # We need to compute information about the time interval
+ # since the last sampling and record the result as sample
+ # for that point in the past.
+ interval = time - self.diskstats_ltime
+ if interval > 0:
+ sums = [ a - b for a, b in zip(diskdata, self.diskstats_data) ]
+ readTput = sums[0] / 2.0 * 100.0 / interval
+ writeTput = sums[1] / 2.0 * 100.0 / interval
+ util = float( sums[2] ) / 10 / interval
+ util = max(0.0, min(1.0, util))
+ reduced = (self.diskstats_ltime, (readTput, writeTput, util))
+
+ self.diskstats_ltime = time
+ self.diskstats_data = diskdata
+ return reduced
+
+
+ def _reduce_nop(self, time, data):
+ return (time, data)
+
+ def _reduce_stat(self, time, data):
+ if not data:
+ return None
+ # CPU times {user, nice, system, idle, io_wait, irq, softirq} from first line
+ tokens = data.split(b'\n', 1)[0].split()
+ times = [ int(token) for token in tokens[1:] ]
+ reduced = None
+ if self.stat_ltimes:
+ user = float((times[0] + times[1]) - (self.stat_ltimes[0] + self.stat_ltimes[1]))
+ system = float((times[2] + times[5] + times[6]) - (self.stat_ltimes[2] + self.stat_ltimes[5] + self.stat_ltimes[6]))
+ idle = float(times[3] - self.stat_ltimes[3])
+ iowait = float(times[4] - self.stat_ltimes[4])
+
+ aSum = max(user + system + idle + iowait, 1)
+ reduced = (time, (user/aSum, system/aSum, iowait/aSum))
+
+ self.stat_ltimes = times
+ return reduced
+
+ def sample(self, event, force):
+ now = time.time()
+ if (now - self.last_proc > self.min_seconds) or force:
+ for filename, output, handler in self.proc_files:
+ with open(os.path.join('/proc', filename), 'rb') as input:
+ data = input.read()
+ if handler:
+ reduced = handler(now, data)
+ else:
+ reduced = (now, data)
+ if reduced:
+ if isinstance(reduced[1], bytes):
+ # Use as it is.
+ data = reduced[1]
+ else:
+ # Convert to a single line.
+ data = (' '.join([str(x) for x in reduced[1]]) + '\n').encode('ascii')
+ # Unbuffered raw write, less overhead and useful
+ # in case that we end up with concurrent writes.
+ os.write(output.fileno(),
+ ('%.0f\n' % reduced[0]).encode('ascii') +
+ data +
+ b'\n')
+ self.last_proc = now
+
+ if isinstance(event, bb.event.MonitorDiskEvent) and \
+ ((now - self.last_disk_monitor > self.min_seconds) or force):
+ os.write(self.monitor_disk.fileno(),
+ ('%.0f\n' % now).encode('ascii') +
+ ''.join(['%s: %d\n' % (dev, sample.total_bytes - sample.free_bytes)
+ for dev, sample in event.disk_usage.items()]).encode('ascii') +
+ b'\n')
+ self.last_disk_monitor = now
diff --git a/poky/meta/lib/oe/__init__.py b/poky/meta/lib/oe/__init__.py
new file mode 100644
index 000000000..3ad9513f4
--- /dev/null
+++ b/poky/meta/lib/oe/__init__.py
@@ -0,0 +1,2 @@
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/poky/meta/lib/oe/buildhistory_analysis.py b/poky/meta/lib/oe/buildhistory_analysis.py
new file mode 100644
index 000000000..b0365abce
--- /dev/null
+++ b/poky/meta/lib/oe/buildhistory_analysis.py
@@ -0,0 +1,665 @@
+# Report significant differences in the buildhistory repository since a specific revision
+#
+# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
+# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Note: requires GitPython 0.3.1+
+#
+# You can use this from the command line by running scripts/buildhistory-diff
+#
+
+import sys
+import os.path
+import difflib
+import git
+import re
+import hashlib
+import collections
+import bb.utils
+import bb.tinfoil
+
+
+# How to display fields
+list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+list_order_fields = ['PACKAGES']
+defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
+numeric_fields = ['PKGSIZE', 'IMAGESIZE']
+# Fields to monitor
+monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
+ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
+# Percentage change to alert for numeric fields
+monitor_numeric_threshold = 10
+# Image files to monitor (note that image-info.txt is handled separately)
+img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
+# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
+related_fields = {}
+related_fields['RDEPENDS'] = ['DEPENDS']
+related_fields['RRECOMMENDS'] = ['DEPENDS']
+related_fields['FILELIST'] = ['FILES']
+related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
+related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+
+colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+}
+
+def init_colours(use_colours):
+ global colours
+ if use_colours:
+ colours = {
+ 'colour_default': '\033[0m',
+ 'colour_add': '\033[1;32m',
+ 'colour_remove': '\033[1;31m',
+ }
+ else:
+ colours = {
+ 'colour_default': '',
+ 'colour_add': '',
+ 'colour_remove': '',
+ }
+
+class ChangeRecord:
+ def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
+ self.path = path
+ self.fieldname = fieldname
+ self.oldvalue = oldvalue
+ self.newvalue = newvalue
+ self.monitored = monitored
+ self.related = []
+ self.filechanges = None
+
+ def __str__(self):
+ return self._str_internal(True)
+
+ def _str_internal(self, outer):
+ if outer:
+ if '/image-files/' in self.path:
+ prefix = '%s: ' % self.path.split('/image-files/')[0]
+ else:
+ prefix = '%s: ' % self.path
+ else:
+ prefix = ''
+
+ def pkglist_combine(depver):
+ pkglist = []
+ for k,v in depver.items():
+ if v:
+ pkglist.append("%s (%s)" % (k,v))
+ else:
+ pkglist.append(k)
+ return pkglist
+
+ def detect_renamed_dirs(aitems, bitems):
+ adirs = set(map(os.path.dirname, aitems))
+ bdirs = set(map(os.path.dirname, bitems))
+ files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
+ for name in adirs - bdirs]
+ files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
+ for name in bdirs - adirs]
+ renamed_dirs = []
+ for dir1, files1 in files_ab:
+ rename = False
+ for dir2, files2 in files_ba:
+ if files1 == files2 and not rename:
+ renamed_dirs.append((dir1,dir2))
+ # Make sure that we don't use this (dir, files) pair again.
+ files_ba.remove((dir2,files2))
+ # If a dir has already been found to have a rename, stop and go no further.
+ rename = True
+
+ # remove files that belong to renamed dirs from aitems and bitems
+ for dir1, dir2 in renamed_dirs:
+ aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
+ bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
+ return renamed_dirs, aitems, bitems
+
+ if self.fieldname in list_fields or self.fieldname in list_order_fields:
+ renamed_dirs = []
+ changed_order = False
+ if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
+ aitems = pkglist_combine(depvera)
+ bitems = pkglist_combine(depverb)
+ else:
+ aitems = self.oldvalue.split()
+ bitems = self.newvalue.split()
+ if self.fieldname == 'FILELIST':
+ renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+
+ removed = list(set(aitems) - set(bitems))
+ added = list(set(bitems) - set(aitems))
+
+ if not removed and not added:
+ depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+ depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+ for i, j in zip(depvera.items(), depverb.items()):
+ if i[0] != j[0]:
+ changed_order = True
+ break
+
+ lines = []
+ if renamed_dirs:
+ for dfrom, dto in renamed_dirs:
+ lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
+ if removed or added:
+ if removed and not bitems:
+ lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
+ else:
+ if removed:
+ lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
+ if added:
+ lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
+ else:
+ lines.append('changed order')
+
+ if not (removed or added or changed_order):
+ out = ''
+ else:
+ out = '%s: %s' % (self.fieldname, ', '.join(lines))
+
+ elif self.fieldname in numeric_fields:
+ aval = int(self.oldvalue or 0)
+ bval = int(self.newvalue or 0)
+ if aval != 0:
+ percentchg = ((bval - aval) / float(aval)) * 100
+ else:
+ percentchg = 100
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
+ elif self.fieldname in defaultval_map:
+ out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
+ if self.fieldname == 'PKG' and '[default]' in self.newvalue:
+ out += ' - may indicate debian renaming failure'
+ elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
+ if self.oldvalue and self.newvalue:
+ out = '%s changed:\n ' % self.fieldname
+ elif self.newvalue:
+ out = '%s added:\n ' % self.fieldname
+ elif self.oldvalue:
+ out = '%s cleared:\n ' % self.fieldname
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
+ out += '\n '.join(list(diff)[2:])
+ out += '\n --'
+ elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
+ if self.filechanges or (self.oldvalue and self.newvalue):
+ fieldname = self.fieldname
+ if '/image-files/' in self.path:
+ fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+ out = 'Changes to %s:\n ' % fieldname
+ else:
+ if outer:
+ prefix = 'Changes to %s ' % self.path
+ out = '(%s):\n ' % self.fieldname
+ if self.filechanges:
+ out += '\n '.join(['%s' % i for i in self.filechanges])
+ else:
+ alines = self.oldvalue.splitlines()
+ blines = self.newvalue.splitlines()
+ diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+ out += '\n '.join(list(diff))
+ out += '\n --'
+ else:
+ out = ''
+ else:
+ out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
+
+ if self.related:
+ for chg in self.related:
+ if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
+ continue
+ for line in chg._str_internal(False).splitlines():
+ out += '\n * %s' % line
+
+ return '%s%s' % (prefix, out) if out else ''
+
+class FileChange:
+ changetype_add = 'A'
+ changetype_remove = 'R'
+ changetype_type = 'T'
+ changetype_perms = 'P'
+ changetype_ownergroup = 'O'
+ changetype_link = 'L'
+
+ def __init__(self, path, changetype, oldvalue = None, newvalue = None):
+ self.path = path
+ self.changetype = changetype
+ self.oldvalue = oldvalue
+ self.newvalue = newvalue
+
+ def _ftype_str(self, ftype):
+ if ftype == '-':
+ return 'file'
+ elif ftype == 'd':
+ return 'directory'
+ elif ftype == 'l':
+ return 'symlink'
+ elif ftype == 'c':
+ return 'char device'
+ elif ftype == 'b':
+ return 'block device'
+ elif ftype == 'p':
+ return 'fifo'
+ elif ftype == 's':
+ return 'socket'
+ else:
+ return 'unknown (%s)' % ftype
+
+ def __str__(self):
+ if self.changetype == self.changetype_add:
+ return '%s was added' % self.path
+ elif self.changetype == self.changetype_remove:
+ return '%s was removed' % self.path
+ elif self.changetype == self.changetype_type:
+ return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
+ elif self.changetype == self.changetype_perms:
+ return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_ownergroup:
+ return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ elif self.changetype == self.changetype_link:
+ return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+ else:
+ return '%s changed (unknown)' % self.path
+
+
+def blob_to_dict(blob):
+ alines = [line for line in blob.data_stream.read().decode('utf-8').splitlines()]
+ adict = {}
+ for line in alines:
+ splitv = [i.strip() for i in line.split('=',1)]
+ if len(splitv) > 1:
+ adict[splitv[0]] = splitv[1]
+ return adict
+
+
+def file_list_to_dict(lines):
+ adict = {}
+ for line in lines:
+ # Leave the last few fields intact so we handle file names containing spaces
+ splitv = line.split(None,4)
+ # Grab the path and remove the leading .
+ path = splitv[4][1:].strip()
+ # Handle symlinks
+ if(' -> ' in path):
+ target = path.split(' -> ')[1]
+ path = path.split(' -> ')[0]
+ adict[path] = splitv[0:3] + [target]
+ else:
+ adict[path] = splitv[0:3]
+ return adict
+
+
+def compare_file_lists(alines, blines):
+ adict = file_list_to_dict(alines)
+ bdict = file_list_to_dict(blines)
+ filechanges = []
+ for path, splitv in adict.items():
+ newsplitv = bdict.pop(path, None)
+ if newsplitv:
+ # Check type
+ oldvalue = splitv[0][0]
+ newvalue = newsplitv[0][0]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+ # Check permissions
+ oldvalue = splitv[0][1:]
+ newvalue = newsplitv[0][1:]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
+ # Check owner/group
+ oldvalue = '%s/%s' % (splitv[1], splitv[2])
+ newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+ # Check symlink target
+ if newsplitv[0][0] == 'l':
+ if len(splitv) > 3:
+ oldvalue = splitv[3]
+ else:
+ oldvalue = None
+ newvalue = newsplitv[3]
+ if oldvalue != newvalue:
+ filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
+ else:
+ filechanges.append(FileChange(path, FileChange.changetype_remove))
+
+ # Whatever is left over has been added
+ for path in bdict:
+ filechanges.append(FileChange(path, FileChange.changetype_add))
+
+ return filechanges
+
+
+def compare_lists(alines, blines):
+ removed = list(set(alines) - set(blines))
+ added = list(set(blines) - set(alines))
+
+ filechanges = []
+ for pkg in removed:
+ filechanges.append(FileChange(pkg, FileChange.changetype_remove))
+ for pkg in added:
+ filechanges.append(FileChange(pkg, FileChange.changetype_add))
+
+ return filechanges
+
+
+def compare_pkg_lists(astr, bstr):
+ depvera = bb.utils.explode_dep_versions2(astr)
+ depverb = bb.utils.explode_dep_versions2(bstr)
+
+ # Strip out changes where the version has increased
+ remove = []
+ for k in depvera:
+ if k in depverb:
+ dva = depvera[k]
+ dvb = depverb[k]
+ if dva and dvb and len(dva) == len(dvb):
+ # Since length is the same, sort so that prefixes (e.g. >=) will line up
+ dva.sort()
+ dvb.sort()
+ removeit = True
+ for dvai, dvbi in zip(dva, dvb):
+ if dvai != dvbi:
+ aiprefix = dvai.split(' ')[0]
+ biprefix = dvbi.split(' ')[0]
+ if aiprefix == biprefix and aiprefix in ['>=', '=']:
+ if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
+ removeit = False
+ break
+ else:
+ removeit = False
+ break
+ if removeit:
+ remove.append(k)
+
+ for k in remove:
+ depvera.pop(k)
+ depverb.pop(k)
+
+ return (depvera, depverb)
+
+
+def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
+ adict = blob_to_dict(ablob)
+ bdict = blob_to_dict(bblob)
+
+ pkgname = os.path.basename(path)
+
+ defaultvals = {}
+ defaultvals['PKG'] = pkgname
+ defaultvals['PKGE'] = '0'
+
+ changes = []
+ keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
+ for key in keys:
+ astr = adict.get(key, '')
+ bstr = bdict.get(key, '')
+ if key in ver_monitor_fields:
+ monitored = report_ver or astr or bstr
+ else:
+ monitored = key in monitor_fields
+ mapped_key = defaultval_map.get(key, '')
+ if mapped_key:
+ if not astr:
+ astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
+ if not bstr:
+ bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
+
+ if astr != bstr:
+ if (not report_all) and key in numeric_fields:
+ aval = int(astr or 0)
+ bval = int(bstr or 0)
+ if aval != 0:
+ percentchg = ((bval - aval) / float(aval)) * 100
+ else:
+ percentchg = 100
+ if abs(percentchg) < monitor_numeric_threshold:
+ continue
+ elif (not report_all) and key in list_fields:
+ if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+ continue
+ if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+ (depvera, depverb) = compare_pkg_lists(astr, bstr)
+ if depvera == depverb:
+ continue
+ alist = astr.split()
+ alist.sort()
+ blist = bstr.split()
+ blist.sort()
+ # We don't care about the removal of self-dependencies
+ if pkgname in alist and not pkgname in blist:
+ alist.remove(pkgname)
+ if ' '.join(alist) == ' '.join(blist):
+ continue
+
+ if key == 'PKGR' and not report_all:
+ vers = []
+ # strip leading 'r' and dots
+ for ver in (astr.split()[0], bstr.split()[0]):
+ if ver.startswith('r'):
+ ver = ver[1:]
+ vers.append(ver.replace('.', ''))
+ maxlen = max(len(vers[0]), len(vers[1]))
+ try:
+ # pad with '0' and convert to int
+ vers = [int(ver.ljust(maxlen, '0')) for ver in vers]
+ except ValueError:
+ pass
+ else:
+ # skip decrements and increments
+ if abs(vers[0] - vers[1]) == 1:
+ continue
+
+ chg = ChangeRecord(path, key, astr, bstr, monitored)
+ changes.append(chg)
+ return changes
+
+
+def compare_siglists(a_blob, b_blob, taskdiff=False):
+ # FIXME collapse down a recipe's tasks?
+ alines = a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = b_blob.data_stream.read().decode('utf-8').splitlines()
+ keys = []
+ pnmap = {}
+ def readsigs(lines):
+ sigs = {}
+ for line in lines:
+ linesplit = line.split()
+ if len(linesplit) > 2:
+ sigs[linesplit[0]] = linesplit[2]
+ if not linesplit[0] in keys:
+ keys.append(linesplit[0])
+ pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
+ return sigs
+ adict = readsigs(alines)
+ bdict = readsigs(blines)
+ out = []
+
+ changecount = 0
+ addcount = 0
+ removecount = 0
+ if taskdiff:
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ changes = collections.OrderedDict()
+
+ def compare_hashfiles(pn, taskname, hash1, hash2):
+ hashes = [hash1, hash2]
+ hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
+
+ if not taskname:
+ (pn, taskname) = pn.rsplit('.', 1)
+ pn = pnmap.get(pn, pn)
+ desc = '%s.%s' % (pn, taskname)
+
+ if len(hashfiles) == 0:
+ out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
+ elif not hash1 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
+ elif not hash2 in hashfiles:
+ out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
+ else:
+ out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+ for line in out2:
+ m = hashlib.sha256()
+ m.update(line.encode('utf-8'))
+ entry = changes.get(m.hexdigest(), (line, []))
+ if desc not in entry[1]:
+ changes[m.hexdigest()] = (line, entry[1] + [desc])
+
+ # Define recursion callback
+ def recursecb(key, hash1, hash2):
+ compare_hashfiles(key, None, hash1, hash2)
+ return []
+
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ changecount += 1
+ (pn, taskname) = key.rsplit('.', 1)
+ compare_hashfiles(pn, taskname, siga, sigb)
+ elif siga is None:
+ addcount += 1
+ elif sigb is None:
+ removecount += 1
+ for key, item in changes.items():
+ line, tasks = item
+ if len(tasks) == 1:
+ desc = tasks[0]
+ elif len(tasks) == 2:
+ desc = '%s and %s' % (tasks[0], tasks[1])
+ else:
+ desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
+ out.append('%s: %s' % (desc, line))
+ else:
+ for key in keys:
+ siga = adict.get(key, None)
+ sigb = bdict.get(key, None)
+ if siga is not None and sigb is not None and siga != sigb:
+ out.append('%s changed from %s to %s' % (key, siga, sigb))
+ changecount += 1
+ elif siga is None:
+ out.append('%s was added' % key)
+ addcount += 1
+ elif sigb is None:
+ out.append('%s was removed' % key)
+ removecount += 1
+ out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
+ return '\n'.join(out)
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False,
+ sigs=False, sigsdiff=False, exclude_path=None):
+ repo = git.Repo(repopath)
+ assert repo.bare == False
+ commit = repo.commit(revision1)
+ diff = commit.diff(revision2)
+
+ changes = []
+
+ if sigs or sigsdiff:
+ for d in diff.iter_change_type('M'):
+ if d.a_blob.path == 'siglist.txt':
+ changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
+ return changes
+
+ for d in diff.iter_change_type('M'):
+ path = os.path.dirname(d.a_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename == 'latest':
+ changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+ elif filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
+ changes.append(chg)
+ elif path.startswith('images/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename in img_monitor_files:
+ if filename == 'files-in-image.txt':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_file_lists(alines,blines)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+ elif filename == 'installed-package-names.txt':
+ alines = d.a_blob.data_stream.read().decode('utf-8').splitlines()
+ blines = d.b_blob.data_stream.read().decode('utf-8').splitlines()
+ filechanges = compare_lists(alines,blines)
+ if filechanges:
+ chg = ChangeRecord(path, filename, None, None, True)
+ chg.filechanges = filechanges
+ changes.append(chg)
+ else:
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
+ changes.append(chg)
+ elif filename == 'image-info.txt':
+ changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+ elif '/image-files/' in path:
+ chg = ChangeRecord(path, filename, d.a_blob.data_stream.read().decode('utf-8'), d.b_blob.data_stream.read().decode('utf-8'), True)
+ changes.append(chg)
+
+ # Look for added preinst/postinst/prerm/postrm
+ # (without reporting newly added recipes)
+ addedpkgs = []
+ addedchanges = []
+ for d in diff.iter_change_type('A'):
+ path = os.path.dirname(d.b_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.b_blob.path)
+ if filename == 'latest':
+ addedpkgs.append(path)
+ elif filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read().decode('utf-8'), True)
+ addedchanges.append(chg)
+ for chg in addedchanges:
+ found = False
+ for pkg in addedpkgs:
+ if chg.path.startswith(pkg):
+ found = True
+ break
+ if not found:
+ changes.append(chg)
+
+ # Look for cleared preinst/postinst/prerm/postrm
+ for d in diff.iter_change_type('D'):
+ path = os.path.dirname(d.a_blob.path)
+ if path.startswith('packages/'):
+ filename = os.path.basename(d.a_blob.path)
+ if filename != 'latest' and filename.startswith('latest.'):
+ chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
+ changes.append(chg)
+
+ # Link related changes
+ for chg in changes:
+ if chg.monitored:
+ for chg2 in changes:
+ # (Check dirname in the case of fields from recipe info files)
+ if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
+ if chg2.fieldname in related_fields.get(chg.fieldname, []):
+ chg.related.append(chg2)
+ elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
+ chg.related.append(chg2)
+
+ # filter out unwanted paths
+ if exclude_path:
+ for chg in changes:
+ if chg.filechanges:
+ fchgs = []
+ for fchg in chg.filechanges:
+ for epath in exclude_path:
+ if fchg.path.startswith(epath):
+ break
+ else:
+ fchgs.append(fchg)
+ chg.filechanges = fchgs
+
+ if report_all:
+ return changes
+ else:
+ return [chg for chg in changes if chg.monitored]
diff --git a/poky/meta/lib/oe/cachedpath.py b/poky/meta/lib/oe/cachedpath.py
new file mode 100644
index 000000000..0840cc4c3
--- /dev/null
+++ b/poky/meta/lib/oe/cachedpath.py
@@ -0,0 +1,233 @@
+#
+# Based on standard python library functions but avoid
+# repeated stat calls. Its assumed the files will not change from under us
+# so we can cache stat calls.
+#
+
+import os
+import errno
+import stat as statmod
+
+class CachedPath(object):
+ def __init__(self):
+ self.statcache = {}
+ self.lstatcache = {}
+ self.normpathcache = {}
+ return
+
+ def updatecache(self, x):
+ x = self.normpath(x)
+ if x in self.statcache:
+ del self.statcache[x]
+ if x in self.lstatcache:
+ del self.lstatcache[x]
+
+ def normpath(self, path):
+ if path in self.normpathcache:
+ return self.normpathcache[path]
+ newpath = os.path.normpath(path)
+ self.normpathcache[path] = newpath
+ return newpath
+
+ def _callstat(self, path):
+ if path in self.statcache:
+ return self.statcache[path]
+ try:
+ st = os.stat(path)
+ self.statcache[path] = st
+ return st
+ except os.error:
+ self.statcache[path] = False
+ return False
+
+ # We might as well call lstat and then only
+ # call stat as well in the symbolic link case
+ # since this turns out to be much more optimal
+ # in real world usage of this cache
+ def callstat(self, path):
+ path = self.normpath(path)
+ self.calllstat(path)
+ return self.statcache[path]
+
+ def calllstat(self, path):
+ path = self.normpath(path)
+ if path in self.lstatcache:
+ return self.lstatcache[path]
+ #bb.error("LStatpath:" + path)
+ try:
+ lst = os.lstat(path)
+ self.lstatcache[path] = lst
+ if not statmod.S_ISLNK(lst.st_mode):
+ self.statcache[path] = lst
+ else:
+ self._callstat(path)
+ return lst
+ except (os.error, AttributeError):
+ self.lstatcache[path] = False
+ self.statcache[path] = False
+ return False
+
+ # This follows symbolic links, so both islink() and isdir() can be true
+ # for the same path ono systems that support symlinks
+ def isfile(self, path):
+ """Test whether a path is a regular file"""
+ st = self.callstat(path)
+ if not st:
+ return False
+ return statmod.S_ISREG(st.st_mode)
+
+ # Is a path a directory?
+ # This follows symbolic links, so both islink() and isdir()
+ # can be true for the same path on systems that support symlinks
+ def isdir(self, s):
+ """Return true if the pathname refers to an existing directory."""
+ st = self.callstat(s)
+ if not st:
+ return False
+ return statmod.S_ISDIR(st.st_mode)
+
+ def islink(self, path):
+ """Test whether a path is a symbolic link"""
+ st = self.calllstat(path)
+ if not st:
+ return False
+ return statmod.S_ISLNK(st.st_mode)
+
+ # Does a path exist?
+ # This is false for dangling symbolic links on systems that support them.
+ def exists(self, path):
+ """Test whether a path exists. Returns False for broken symbolic links"""
+ if self.callstat(path):
+ return True
+ return False
+
+ def lexists(self, path):
+ """Test whether a path exists. Returns True for broken symbolic links"""
+ if self.calllstat(path):
+ return True
+ return False
+
+ def stat(self, path):
+ return self.callstat(path)
+
+ def lstat(self, path):
+ return self.calllstat(path)
+
+ def walk(self, top, topdown=True, onerror=None, followlinks=False):
+ # Matches os.walk, not os.path.walk()
+
+ # We may not have read permission for top, in which case we can't
+ # get a list of the files the directory contains. os.path.walk
+ # always suppressed the exception then, rather than blow up for a
+ # minor reason when (say) a thousand readable directories are still
+ # left to visit. That logic is copied here.
+ try:
+ names = os.listdir(top)
+ except os.error as err:
+ if onerror is not None:
+ onerror(err)
+ return
+
+ dirs, nondirs = [], []
+ for name in names:
+ if self.isdir(os.path.join(top, name)):
+ dirs.append(name)
+ else:
+ nondirs.append(name)
+
+ if topdown:
+ yield top, dirs, nondirs
+ for name in dirs:
+ new_path = os.path.join(top, name)
+ if followlinks or not self.islink(new_path):
+ for x in self.walk(new_path, topdown, onerror, followlinks):
+ yield x
+ if not topdown:
+ yield top, dirs, nondirs
+
+ ## realpath() related functions
+ def __is_path_below(self, file, root):
+ return (file + os.path.sep).startswith(root)
+
+ def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
+ """Calculates real path of symlink 'start' + 'rel_path' below
+ 'root'; no part of 'start' below 'root' must contain symlinks. """
+ have_dir = True
+
+ for d in rel_path.split(os.path.sep):
+ if not have_dir and not assume_dir:
+ raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+ if d == os.path.pardir: # '..'
+ if len(start) >= len(root):
+ # do not follow '..' before root
+ start = os.path.dirname(start)
+ else:
+ # emit warning?
+ pass
+ else:
+ (start, have_dir) = self.__realpath(os.path.join(start, d),
+ root, loop_cnt, assume_dir)
+
+ assert(self.__is_path_below(start, root))
+
+ return start
+
+ def __realpath(self, file, root, loop_cnt, assume_dir):
+ while self.islink(file) and len(file) >= len(root):
+ if loop_cnt == 0:
+ raise OSError(errno.ELOOP, file)
+
+ loop_cnt -= 1
+ target = os.path.normpath(os.readlink(file))
+
+ if not os.path.isabs(target):
+ tdir = os.path.dirname(file)
+ assert(self.__is_path_below(tdir, root))
+ else:
+ tdir = root
+
+ file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+ try:
+ is_dir = self.isdir(file)
+ except:
+ is_dir = False
+
+ return (file, is_dir)
+
+ def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+ """ Returns the canonical path of 'file' with assuming a
+ toplevel 'root' directory. When 'use_physdir' is set, all
+ preceding path components of 'file' will be resolved first;
+ this flag should be set unless it is guaranteed that there is
+ no symlink in the path. When 'assume_dir' is not set, missing
+ path components will raise an ENOENT error"""
+
+ root = os.path.normpath(root)
+ file = os.path.normpath(file)
+
+ if not root.endswith(os.path.sep):
+ # letting root end with '/' makes some things easier
+ root = root + os.path.sep
+
+ if not self.__is_path_below(file, root):
+ raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+ try:
+ if use_physdir:
+ file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+ else:
+ file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
+ except OSError as e:
+ if e.errno == errno.ELOOP:
+ # make ELOOP more readable; without catching it, there will
+ # be printed a backtrace with 100s of OSError exceptions
+ # else
+ raise OSError(errno.ELOOP,
+ "too much recursions while resolving '%s'; loop in '%s'" %
+ (file, e.strerror))
+
+ raise
+
+ return file
diff --git a/poky/meta/lib/oe/classextend.py b/poky/meta/lib/oe/classextend.py
new file mode 100644
index 000000000..d2eeaf0e5
--- /dev/null
+++ b/poky/meta/lib/oe/classextend.py
@@ -0,0 +1,122 @@
+import collections
+
+class ClassExtender(object):
+ def __init__(self, extname, d):
+ self.extname = extname
+ self.d = d
+ self.pkgs_mapping = []
+
+ def extend_name(self, name):
+ if name.startswith("kernel-") or name == "virtual/kernel":
+ return name
+ if name.startswith("rtld"):
+ return name
+ if name.endswith("-crosssdk"):
+ return name
+ if name.endswith("-" + self.extname):
+ name = name.replace("-" + self.extname, "")
+ if name.startswith("virtual/"):
+ subs = name.split("/", 1)[1]
+ if not subs.startswith(self.extname):
+ return "virtual/" + self.extname + "-" + subs
+ return name
+ if not name.startswith(self.extname):
+ return self.extname + "-" + name
+ return name
+
+ def map_variable(self, varname, setvar = True):
+ var = self.d.getVar(varname)
+ if not var:
+ return ""
+ var = var.split()
+ newvar = []
+ for v in var:
+ newvar.append(self.extend_name(v))
+ newdata = " ".join(newvar)
+ if setvar:
+ self.d.setVar(varname, newdata)
+ return newdata
+
+ def map_regexp_variable(self, varname, setvar = True):
+ var = self.d.getVar(varname)
+ if not var:
+ return ""
+ var = var.split()
+ newvar = []
+ for v in var:
+ if v.startswith("^" + self.extname):
+ newvar.append(v)
+ elif v.startswith("^"):
+ newvar.append("^" + self.extname + "-" + v[1:])
+ else:
+ newvar.append(self.extend_name(v))
+ newdata = " ".join(newvar)
+ if setvar:
+ self.d.setVar(varname, newdata)
+ return newdata
+
+ def map_depends(self, dep):
+ if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
+ return dep
+ else:
+ # Do not extend for that already have multilib prefix
+ var = self.d.getVar("MULTILIB_VARIANTS")
+ if var:
+ var = var.split()
+ for v in var:
+ if dep.startswith(v):
+ return dep
+ return self.extend_name(dep)
+
+ def map_depends_variable(self, varname, suffix = ""):
+ # We need to preserve EXTENDPKGV so it can be expanded correctly later
+ if suffix:
+ varname = varname + "_" + suffix
+ orig = self.d.getVar("EXTENDPKGV", False)
+ self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
+ deps = self.d.getVar(varname)
+ if not deps:
+ self.d.setVar("EXTENDPKGV", orig)
+ return
+ deps = bb.utils.explode_dep_versions2(deps)
+ newdeps = collections.OrderedDict()
+ for dep in deps:
+ newdeps[self.map_depends(dep)] = deps[dep]
+
+ self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
+ self.d.setVar("EXTENDPKGV", orig)
+
+ def map_packagevars(self):
+ for pkg in (self.d.getVar("PACKAGES").split() + [""]):
+ self.map_depends_variable("RDEPENDS", pkg)
+ self.map_depends_variable("RRECOMMENDS", pkg)
+ self.map_depends_variable("RSUGGESTS", pkg)
+ self.map_depends_variable("RPROVIDES", pkg)
+ self.map_depends_variable("RREPLACES", pkg)
+ self.map_depends_variable("RCONFLICTS", pkg)
+ self.map_depends_variable("PKG", pkg)
+
+ def rename_packages(self):
+ for pkg in (self.d.getVar("PACKAGES") or "").split():
+ if pkg.startswith(self.extname):
+ self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
+ continue
+ self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
+
+ self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
+
+ def rename_package_variables(self, variables):
+ for pkg_mapping in self.pkgs_mapping:
+ for subs in variables:
+ self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+
+class NativesdkClassExtender(ClassExtender):
+ def map_depends(self, dep):
+ if dep.startswith(self.extname):
+ return dep
+ if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+ return dep + "-crosssdk"
+ elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+ return dep
+ else:
+ return self.extend_name(dep)
diff --git a/poky/meta/lib/oe/classutils.py b/poky/meta/lib/oe/classutils.py
new file mode 100644
index 000000000..45cd5249b
--- /dev/null
+++ b/poky/meta/lib/oe/classutils.py
@@ -0,0 +1,44 @@
+
+class ClassRegistryMeta(type):
+ """Give each ClassRegistry their own registry"""
+ def __init__(cls, name, bases, attrs):
+ cls.registry = {}
+ type.__init__(cls, name, bases, attrs)
+
+class ClassRegistry(type, metaclass=ClassRegistryMeta):
+ """Maintain a registry of classes, indexed by name.
+
+Note that this implementation requires that the names be unique, as it uses
+a dictionary to hold the classes by name.
+
+The name in the registry can be overridden via the 'name' attribute of the
+class, and the 'priority' attribute controls priority. The prioritized()
+method returns the registered classes in priority order.
+
+Subclasses of ClassRegistry may define an 'implemented' property to exert
+control over whether the class will be added to the registry (e.g. to keep
+abstract base classes out of the registry)."""
+ priority = 0
+ def __init__(cls, name, bases, attrs):
+ super(ClassRegistry, cls).__init__(name, bases, attrs)
+ try:
+ if not cls.implemented:
+ return
+ except AttributeError:
+ pass
+
+ try:
+ cls.name
+ except AttributeError:
+ cls.name = name
+ cls.registry[cls.name] = cls
+
+ @classmethod
+ def prioritized(tcls):
+ return sorted(list(tcls.registry.values()),
+ key=lambda v: (v.priority, v.name), reverse=True)
+
+ def unregister(cls):
+ for key in cls.registry.keys():
+ if cls.registry[key] is cls:
+ del cls.registry[key]
diff --git a/poky/meta/lib/oe/copy_buildsystem.py b/poky/meta/lib/oe/copy_buildsystem.py
new file mode 100644
index 000000000..4b94806c7
--- /dev/null
+++ b/poky/meta/lib/oe/copy_buildsystem.py
@@ -0,0 +1,262 @@
+# This class should provide easy access to the different aspects of the
+# buildsystem such as layers, bitbake location, etc.
+import stat
+import shutil
+
+def _smart_copy(src, dest):
+ import subprocess
+ # smart_copy will choose the correct function depending on whether the
+ # source is a file or a directory.
+ mode = os.stat(src).st_mode
+ if stat.S_ISDIR(mode):
+ bb.utils.mkdirhier(dest)
+ cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \
+ | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ else:
+ shutil.copyfile(src, dest)
+ shutil.copymode(src, dest)
+
+class BuildSystem(object):
+ def __init__(self, context, d):
+ self.d = d
+ self.context = context
+ self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
+ self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+
+ def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+ # Copy in all metadata layers + bitbake (as repositories)
+ layers_copied = []
+ bb.utils.mkdirhier(destdir)
+ layers = list(self.layerdirs)
+
+ corebase = os.path.abspath(self.d.getVar('COREBASE'))
+ layers.append(corebase)
+ # The bitbake build system uses the meta-skeleton layer as a layout
+ # for common recipies, e.g: the recipetool script to create kernel recipies
+ # Add the meta-skeleton layer to be included as part of the eSDK installation
+ layers.append(os.path.join(corebase, 'meta-skeleton'))
+
+ # Exclude layers
+ for layer_exclude in self.layers_exclude:
+ if layer_exclude in layers:
+ layers.remove(layer_exclude)
+
+ workspace_newname = workspace_name
+ if workspace_newname:
+ layernames = [os.path.basename(layer) for layer in layers]
+ extranum = 0
+ while workspace_newname in layernames:
+ extranum += 1
+ workspace_newname = '%s-%d' % (workspace_name, extranum)
+
+ corebase_files = self.d.getVar('COREBASE_FILES').split()
+ corebase_files = [corebase + '/' +x for x in corebase_files]
+ # Make sure bitbake goes in
+ bitbake_dir = bb.__file__.rsplit('/', 3)[0]
+ corebase_files.append(bitbake_dir)
+
+ for layer in layers:
+ layerconf = os.path.join(layer, 'conf', 'layer.conf')
+ layernewname = os.path.basename(layer)
+ workspace = False
+ if os.path.exists(layerconf):
+ with open(layerconf, 'r') as f:
+ if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
+ if workspace_newname:
+ layernewname = workspace_newname
+ workspace = True
+ else:
+ bb.plain("NOTE: Excluding local workspace layer %s from %s" % (layer, self.context))
+ continue
+
+ # If the layer was already under corebase, leave it there
+ # since layers such as meta have issues when moved.
+ layerdestpath = destdir
+ if corebase == os.path.dirname(layer):
+ layerdestpath += '/' + os.path.basename(corebase)
+ else:
+ layer_relative = os.path.basename(corebase) + '/' + os.path.relpath(layer, corebase)
+ if os.path.dirname(layer_relative) != layernewname:
+ layerdestpath += '/' + os.path.dirname(layer_relative)
+
+ layerdestpath += '/' + layernewname
+
+ layer_relative = os.path.relpath(layerdestpath,
+ destdir)
+ layers_copied.append(layer_relative)
+
+ # Treat corebase as special since it typically will contain
+ # build directories or other custom items.
+ if corebase == layer:
+ bb.utils.mkdirhier(layerdestpath)
+ for f in corebase_files:
+ f_basename = os.path.basename(f)
+ destname = os.path.join(layerdestpath, f_basename)
+ _smart_copy(f, destname)
+ else:
+ if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
+ bb.note("Skipping layer %s, already handled" % layer)
+ else:
+ _smart_copy(layer, layerdestpath)
+
+ if workspace:
+ # Make some adjustments original workspace layer
+ # Drop sources (recipe tasks will be locked, so we don't need them)
+ srcdir = os.path.join(layerdestpath, 'sources')
+ if os.path.isdir(srcdir):
+ shutil.rmtree(srcdir)
+ # Drop all bbappends except the one for the image the SDK is being built for
+ # (because of externalsrc, the workspace bbappends will interfere with the
+ # locked signatures if present, and we don't need them anyway)
+ image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
+ appenddir = os.path.join(layerdestpath, 'appends')
+ if os.path.isdir(appenddir):
+ for fn in os.listdir(appenddir):
+ if fn == image_bbappend:
+ continue
+ else:
+ os.remove(os.path.join(appenddir, fn))
+ # Drop README
+ readme = os.path.join(layerdestpath, 'README')
+ if os.path.exists(readme):
+ os.remove(readme)
+ # Filter out comments in layer.conf and change layer name
+ layerconf = os.path.join(layerdestpath, 'conf', 'layer.conf')
+ with open(layerconf, 'r') as f:
+ origlines = f.readlines()
+ with open(layerconf, 'w') as f:
+ for line in origlines:
+ if line.startswith('#'):
+ continue
+ line = line.replace('workspacelayer', workspace_newname)
+ f.write(line)
+
+ # meta-skeleton layer is added as part of the build system
+ # but not as a layer included in the build, therefore it is
+ # not reported to the function caller.
+ for layer in layers_copied:
+ if layer.endswith('/meta-skeleton'):
+ layers_copied.remove(layer)
+ break
+
+ return layers_copied
+
+def generate_locked_sigs(sigfile, d):
+ bb.utils.mkdirhier(os.path.dirname(sigfile))
+ depd = d.getVar('BB_TASKDEPDATA', False)
+ tasks = ['%s.%s' % (v[2], v[1]) for v in depd.values()]
+ bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
+
+def prune_lockedsigs(excluded_tasks, excluded_targets, lockedsigs, pruned_output):
+ with open(lockedsigs, 'r') as infile:
+ bb.utils.mkdirhier(os.path.dirname(pruned_output))
+ with open(pruned_output, 'w') as f:
+ invalue = False
+ for line in infile:
+ if invalue:
+ if line.endswith('\\\n'):
+ splitval = line.strip().split(':')
+ if not splitval[1] in excluded_tasks and not splitval[0] in excluded_targets:
+ f.write(line)
+ else:
+ f.write(line)
+ invalue = False
+ elif line.startswith('SIGGEN_LOCKEDSIGS'):
+ invalue = True
+ f.write(line)
+
+def merge_lockedsigs(copy_tasks, lockedsigs_main, lockedsigs_extra, merged_output, copy_output=None):
+ merged = {}
+ arch_order = []
+ with open(lockedsigs_main, 'r') as f:
+ invalue = None
+ for line in f:
+ if invalue:
+ if line.endswith('\\\n'):
+ merged[invalue].append(line)
+ else:
+ invalue = None
+ elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
+ invalue = line[18:].split('=', 1)[0].rstrip()
+ merged[invalue] = []
+ arch_order.append(invalue)
+
+ with open(lockedsigs_extra, 'r') as f:
+ invalue = None
+ tocopy = {}
+ for line in f:
+ if invalue:
+ if line.endswith('\\\n'):
+ if not line in merged[invalue]:
+ target, task = line.strip().split(':')[:2]
+ if not copy_tasks or task in copy_tasks:
+ tocopy[invalue].append(line)
+ merged[invalue].append(line)
+ else:
+ invalue = None
+ elif line.startswith('SIGGEN_LOCKEDSIGS_t-'):
+ invalue = line[18:].split('=', 1)[0].rstrip()
+ if not invalue in merged:
+ merged[invalue] = []
+ arch_order.append(invalue)
+ tocopy[invalue] = []
+
+ def write_sigs_file(fn, types, sigs):
+ fulltypes = []
+ bb.utils.mkdirhier(os.path.dirname(fn))
+ with open(fn, 'w') as f:
+ for typename in types:
+ lines = sigs[typename]
+ if lines:
+ f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % typename)
+ for line in lines:
+ f.write(line)
+ f.write(' "\n')
+ fulltypes.append(typename)
+ f.write('SIGGEN_LOCKEDSIGS_TYPES = "%s"\n' % ' '.join(fulltypes))
+
+ if copy_output:
+ write_sigs_file(copy_output, list(tocopy.keys()), tocopy)
+ if merged_output:
+ write_sigs_file(merged_output, arch_order, merged)
+
+def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring="", filterfile=None):
+ import shutil
+ bb.note('Generating sstate-cache...')
+
+ nativelsbstring = d.getVar('NATIVELSBSTRING')
+ bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
+ if fixedlsbstring and nativelsbstring != fixedlsbstring:
+ nativedir = output_sstate_cache + '/' + nativelsbstring
+ if os.path.isdir(nativedir):
+ destdir = os.path.join(output_sstate_cache, fixedlsbstring)
+ for root, _, files in os.walk(nativedir):
+ for fn in files:
+ src = os.path.join(root, fn)
+ dest = os.path.join(destdir, os.path.relpath(src, nativedir))
+ if os.path.exists(dest):
+ # Already exists, and it'll be the same file, so just delete it
+ os.unlink(src)
+ else:
+ bb.utils.mkdirhier(os.path.dirname(dest))
+ shutil.move(src, dest)
+
+def check_sstate_task_list(d, targets, filteroutfile, cmdprefix='', cwd=None, logfile=None):
+ import subprocess
+
+ bb.note('Generating sstate task list...')
+
+ if not cwd:
+ cwd = os.getcwd()
+ if logfile:
+ logparam = '-l %s' % logfile
+ else:
+ logparam = ''
+ cmd = "%sBB_SETSCENE_ENFORCE=1 PSEUDO_DISABLED=1 oe-check-sstate %s -s -o %s %s" % (cmdprefix, targets, filteroutfile, logparam)
+ env = dict(d.getVar('BB_ORIGENV', False))
+ env.pop('BUILDDIR', '')
+ env.pop('BBPATH', '')
+ pathitems = env['PATH'].split(':')
+ env['PATH'] = ':'.join([item for item in pathitems if not item.endswith('/bitbake/bin')])
+ bb.process.run(cmd, stderr=subprocess.STDOUT, env=env, cwd=cwd, executable='/bin/bash')
diff --git a/poky/meta/lib/oe/data.py b/poky/meta/lib/oe/data.py
new file mode 100644
index 000000000..b8901e63f
--- /dev/null
+++ b/poky/meta/lib/oe/data.py
@@ -0,0 +1,47 @@
+import json
+import oe.maketype
+
+def typed_value(key, d):
+ """Construct a value for the specified metadata variable, using its flags
+ to determine the type and parameters for construction."""
+ var_type = d.getVarFlag(key, 'type')
+ flags = d.getVarFlags(key)
+ if flags is not None:
+ flags = dict((flag, d.expand(value))
+ for flag, value in list(flags.items()))
+ else:
+ flags = {}
+
+ try:
+ return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
+ except (TypeError, ValueError) as exc:
+ bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
+
+def export2json(d, json_file, expand=True, searchString="",replaceString=""):
+ data2export = {}
+ keys2export = []
+
+ for key in d.keys():
+ if key.startswith("_"):
+ continue
+ elif key.startswith("BB"):
+ continue
+ elif key.startswith("B_pn"):
+ continue
+ elif key.startswith("do_"):
+ continue
+ elif d.getVarFlag(key, "func"):
+ continue
+
+ keys2export.append(key)
+
+ for key in keys2export:
+ try:
+ data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
+ except bb.data_smart.ExpansionError:
+ data2export[key] = ''
+ except AttributeError:
+ pass
+
+ with open(json_file, "w") as f:
+ json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/poky/meta/lib/oe/distro_check.py b/poky/meta/lib/oe/distro_check.py
new file mode 100644
index 000000000..e775c3a6e
--- /dev/null
+++ b/poky/meta/lib/oe/distro_check.py
@@ -0,0 +1,308 @@
+def create_socket(url, d):
+ import urllib
+ from bb.utils import export_proxies
+
+ export_proxies(d)
+ return urllib.request.urlopen(url)
+
+def get_links_from_url(url, d):
+ "Return all the href links found on the web location"
+
+ from bs4 import BeautifulSoup, SoupStrainer
+
+ soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
+ hyperlinks = []
+ for line in soup.find_all('a', href=True):
+ hyperlinks.append(line['href'].strip('/'))
+ return hyperlinks
+
+def find_latest_numeric_release(url, d):
+ "Find the latest listed numeric release on the given url"
+ max=0
+ maxstr=""
+ for link in get_links_from_url(url, d):
+ try:
+ # TODO use LooseVersion
+ release = float(link)
+ except:
+ release = 0
+ if release > max:
+ max = release
+ maxstr = link
+ return maxstr
+
+def is_src_rpm(name):
+ "Check if the link is pointing to a src.rpm file"
+ return name.endswith(".src.rpm")
+
+def package_name_from_srpm(srpm):
+ "Strip out the package name from the src.rpm filename"
+
+ # ca-certificates-2016.2.7-1.0.fc24.src.rpm
+ # ^name ^ver ^release^removed
+ (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
+ return name
+
+def get_source_package_list_from_url(url, section, d):
+ "Return a sectioned list of package names from a URL list"
+
+ bb.note("Reading %s: %s" % (url, section))
+ links = get_links_from_url(url, d)
+ srpms = filter(is_src_rpm, links)
+ names_list = map(package_name_from_srpm, srpms)
+
+ new_pkgs = set()
+ for pkgs in names_list:
+ new_pkgs.add(pkgs + ":" + section)
+ return new_pkgs
+
+def get_source_package_list_from_url_by_letter(url, section, d):
+ import string
+ from urllib.error import HTTPError
+ packages = set()
+ for letter in (string.ascii_lowercase + string.digits):
+ # Not all subfolders may exist, so silently handle 404
+ try:
+ packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
+ except HTTPError as e:
+ if e.code != 404: raise
+ return packages
+
+def get_latest_released_fedora_source_package_list(d):
+ "Returns list of all the name os packages in the latest fedora distro"
+ latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
+ package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
+ return latest, package_names
+
+def get_latest_released_opensuse_source_package_list(d):
+ "Returns list of all the name os packages in the latest opensuse distro"
+ latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/leap", d)
+
+ package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/leap/%s/repo/oss/suse/src/" % latest, "main", d)
+ package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/leap/%s/oss/src/" % latest, "updates", d)
+ return latest, package_names
+
+def get_latest_released_clear_source_package_list(d):
+ latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
+ package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
+ return latest, package_names
+
+def find_latest_debian_release(url, d):
+ "Find the latest listed debian release on the given url"
+
+ releases = [link.replace("Debian", "")
+ for link in get_links_from_url(url, d)
+ if link.startswith("Debian")]
+ releases.sort()
+ try:
+ return releases[-1]
+ except:
+ return "_NotFound_"
+
+def get_debian_style_source_package_list(url, section, d):
+ "Return the list of package-names stored in the debian style Sources.gz file"
+ import gzip
+
+ package_names = set()
+ for line in gzip.open(create_socket(url, d), mode="rt"):
+ if line.startswith("Package:"):
+ pkg = line.split(":", 1)[1].strip()
+ package_names.add(pkg + ":" + section)
+ return package_names
+
+def get_latest_released_debian_source_package_list(d):
+ "Returns list of all the name of packages in the latest debian distro"
+ latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
+ url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
+ package_names = get_debian_style_source_package_list(url, "main", d)
+ url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
+
+def find_latest_ubuntu_release(url, d):
+ """
+ Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
+
+ To avoid matching development releases look for distributions that have
+ updates, so the resulting distro could be any supported release.
+ """
+ url += "?C=M;O=D" # Descending Sort by Last Modified
+ for link in get_links_from_url(url, d):
+ if "-updates" in link:
+ distro = link.replace("-updates", "")
+ return distro
+ return "_NotFound_"
+
+def get_latest_released_ubuntu_source_package_list(d):
+ "Returns list of all the name os packages in the latest ubuntu distro"
+ latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
+ url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
+ package_names = get_debian_style_source_package_list(url, "main", d)
+ url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
+ package_names |= get_debian_style_source_package_list(url, "updates", d)
+ return latest, package_names
+
+def create_distro_packages_list(distro_check_dir, d):
+ import shutil
+
+ pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+ bb.utils.remove(pkglst_dir, True)
+ bb.utils.mkdirhier(pkglst_dir)
+
+ per_distro_functions = (
+ ("Debian", get_latest_released_debian_source_package_list),
+ ("Ubuntu", get_latest_released_ubuntu_source_package_list),
+ ("Fedora", get_latest_released_fedora_source_package_list),
+ ("openSUSE", get_latest_released_opensuse_source_package_list),
+ ("Clear", get_latest_released_clear_source_package_list),
+ )
+
+ for name, fetcher_func in per_distro_functions:
+ try:
+ release, package_list = fetcher_func(d)
+ except Exception as e:
+ bb.warn("Cannot fetch packages for %s: %s" % (name, e))
+ bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+ if len(package_list) == 0:
+ bb.error("Didn't fetch any packages for %s %s" % (name, release))
+
+ package_list_file = os.path.join(pkglst_dir, name + "-" + release)
+ with open(package_list_file, 'w') as f:
+ for pkg in sorted(package_list):
+ f.write(pkg + "\n")
+
+def update_distro_data(distro_check_dir, datetime, d):
+ """
+ If distro packages list data is old then rebuild it.
+ The operations has to be protected by a lock so that
+ only one thread performes it at a time.
+ """
+ if not os.path.isdir (distro_check_dir):
+ try:
+ bb.note ("Making new directory: %s" % distro_check_dir)
+ os.makedirs (distro_check_dir)
+ except OSError:
+ raise Exception('Unable to create directory %s' % (distro_check_dir))
+
+
+ datetime_file = os.path.join(distro_check_dir, "build_datetime")
+ saved_datetime = "_invalid_"
+ import fcntl
+ try:
+ if not os.path.exists(datetime_file):
+ open(datetime_file, 'w+').close() # touch the file so that the next open won't fail
+
+ f = open(datetime_file, "r+")
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ saved_datetime = f.read()
+ if saved_datetime[0:8] != datetime[0:8]:
+ bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
+ bb.note("Regenerating distro package lists")
+ create_distro_packages_list(distro_check_dir, d)
+ f.seek(0)
+ f.write(datetime)
+
+ except OSError as e:
+ raise Exception('Unable to open timestamp: %s' % e)
+ finally:
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ f.close()
+
+def compare_in_distro_packages_list(distro_check_dir, d):
+ if not os.path.isdir(distro_check_dir):
+ raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
+
+ localdata = bb.data.createCopy(d)
+ pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+ matching_distros = []
+ pn = recipe_name = d.getVar('PN')
+ bb.note("Checking: %s" % pn)
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
+ recipe_name = pnstripped[0]
+
+ if pn.startswith("nativesdk-"):
+ pnstripped = pn.split("nativesdk-")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
+ recipe_name = pnstripped[1]
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
+ recipe_name = pnstripped[0]
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
+ recipe_name = pnstripped[0]
+
+ bb.note("Recipe: %s" % recipe_name)
+
+ distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
+ tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
+ for str in tmp.split():
+ if str and str.find("=") == -1 and distro_exceptions[str]:
+ matching_distros.append(str)
+
+ distro_pn_aliases = {}
+ for str in tmp.split():
+ if "=" in str:
+ (dist, pn_alias) = str.split('=')
+ distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+
+ for file in os.listdir(pkglst_dir):
+ (distro, distro_release) = file.split("-")
+ f = open(os.path.join(pkglst_dir, file), "r")
+ for line in f:
+ (pkg, section) = line.split(":")
+ if distro.lower() in distro_pn_aliases:
+ pn = distro_pn_aliases[distro.lower()]
+ else:
+ pn = recipe_name
+ if pn == pkg:
+ matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
+ f.close()
+ break
+ f.close()
+
+ for item in tmp.split():
+ matching_distros.append(item)
+ bb.note("Matching: %s" % matching_distros)
+ return matching_distros
+
+def create_log_file(d, logname):
+ logpath = d.getVar('LOG_DIR')
+ bb.utils.mkdirhier(logpath)
+ logfn, logsuffix = os.path.splitext(logname)
+ logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
+ if not os.path.exists(logfile):
+ slogfile = os.path.join(logpath, logname)
+ if os.path.exists(slogfile):
+ os.remove(slogfile)
+ open(logfile, 'w+').close()
+ os.symlink(logfile, slogfile)
+ d.setVar('LOG_FILE', logfile)
+ return logfile
+
+
+def save_distro_check_result(result, datetime, result_file, d):
+ pn = d.getVar('PN')
+ logdir = d.getVar('LOG_DIR')
+ if not logdir:
+ bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
+ return
+ bb.utils.mkdirhier(logdir)
+
+ line = pn
+ for i in result:
+ line = line + "," + i
+ f = open(result_file, "a")
+ import fcntl
+ fcntl.lockf(f, fcntl.LOCK_EX)
+ f.seek(0, os.SEEK_END) # seek to the end of file
+ f.write(line + "\n")
+ fcntl.lockf(f, fcntl.LOCK_UN)
+ f.close()
diff --git a/poky/meta/lib/oe/gpg_sign.py b/poky/meta/lib/oe/gpg_sign.py
new file mode 100644
index 000000000..b17272928
--- /dev/null
+++ b/poky/meta/lib/oe/gpg_sign.py
@@ -0,0 +1,128 @@
+"""Helper module for GPG signing"""
+import os
+
+import bb
+import oe.utils
+
+class LocalSigner(object):
+ """Class for handling local (on the build host) signing"""
+ def __init__(self, d):
+ self.gpg_bin = d.getVar('GPG_BIN') or \
+ bb.utils.which(os.getenv('PATH'), 'gpg')
+ self.gpg_path = d.getVar('GPG_PATH')
+ self.gpg_version = self.get_gpg_version()
+ self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+ self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
+
+ def export_pubkey(self, output_file, keyid, armor=True):
+ """Export GPG public key to a file"""
+ cmd = '%s --no-permission-warning --batch --yes --export -o %s ' % \
+ (self.gpg_bin, output_file)
+ if self.gpg_path:
+ cmd += "--homedir %s " % self.gpg_path
+ if armor:
+ cmd += "--armor "
+ cmd += keyid
+ status, output = oe.utils.getstatusoutput(cmd)
+ if status:
+ raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
+ (keyid, output))
+
+ def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
+ """Sign RPM files"""
+
+ cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid
+ gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
+ if self.gpg_version > (2,1,):
+ gpg_args += ' --pinentry-mode=loopback'
+ cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
+ cmd += "--define '_binary_filedigest_algorithm %s' " % digest
+ if self.gpg_bin:
+ cmd += "--define '__gpg %s' " % self.gpg_bin
+ if self.gpg_path:
+ cmd += "--define '_gpg_path %s' " % self.gpg_path
+ if fsk:
+ cmd += "--signfiles --fskpath %s " % fsk
+ if fsk_password:
+ cmd += "--define '_file_signing_key_password %s' " % fsk_password
+
+ # Sign in chunks
+ for i in range(0, len(files), sign_chunk):
+ status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+sign_chunk]))
+ if status:
+ raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+
+ def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
+ """Create a detached signature of a file"""
+ import subprocess
+
+ if passphrase_file and passphrase:
+ raise Exception("You should use either passphrase_file of passphrase, not both")
+
+ cmd = [self.gpg_bin, '--detach-sign', '--no-permission-warning', '--batch',
+ '--no-tty', '--yes', '--passphrase-fd', '0', '-u', keyid]
+
+ if self.gpg_path:
+ cmd += ['--homedir', self.gpg_path]
+ if armor:
+ cmd += ['--armor']
+
+ #gpg > 2.1 supports password pipes only through the loopback interface
+ #gpg < 2.1 errors out if given unknown parameters
+ if self.gpg_version > (2,1,):
+ cmd += ['--pinentry-mode', 'loopback']
+
+ if self.gpg_agent_bin:
+ cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
+
+ cmd += [input_file]
+
+ try:
+ if passphrase_file:
+ with open(passphrase_file) as fobj:
+ passphrase = fobj.readline();
+
+ job = subprocess.Popen(cmd, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ (_, stderr) = job.communicate(passphrase.encode("utf-8"))
+
+ if job.returncode:
+ raise bb.build.FuncFailed("GPG exited with code %d: %s" %
+ (job.returncode, stderr.decode("utf-8")))
+
+ except IOError as e:
+ bb.error("IO error (%s): %s" % (e.errno, e.strerror))
+ raise Exception("Failed to sign '%s'" % input_file)
+
+ except OSError as e:
+ bb.error("OS error (%s): %s" % (e.errno, e.strerror))
+ raise Exception("Failed to sign '%s" % input_file)
+
+
+ def get_gpg_version(self):
+ """Return the gpg version as a tuple of ints"""
+ import subprocess
+ try:
+ ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
+ return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
+ except subprocess.CalledProcessError as e:
+ raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
+
+
+ def verify(self, sig_file):
+ """Verify signature"""
+ cmd = self.gpg_bin + " --verify --no-permission-warning "
+ if self.gpg_path:
+ cmd += "--homedir %s " % self.gpg_path
+ cmd += sig_file
+ status, _ = oe.utils.getstatusoutput(cmd)
+ ret = False if status else True
+ return ret
+
+
+def get_signer(d, backend):
+ """Get signer object for the specified backend"""
+ # Use local signing by default
+ if backend == 'local':
+ return LocalSigner(d)
+ else:
+ bb.fatal("Unsupported signing backend '%s'" % backend)
diff --git a/poky/meta/lib/oe/license.py b/poky/meta/lib/oe/license.py
new file mode 100644
index 000000000..ca385d518
--- /dev/null
+++ b/poky/meta/lib/oe/license.py
@@ -0,0 +1,243 @@
+# vi:sts=4:sw=4:et
+"""Code for parsing OpenEmbedded license strings"""
+
+import ast
+import re
+from fnmatch import fnmatchcase as fnmatch
+
+def license_ok(license, dont_want_licenses):
+ """ Return False if License exist in dont_want_licenses else True """
+ for dwl in dont_want_licenses:
+ # If you want to exclude license named generically 'X', we
+ # surely want to exclude 'X+' as well. In consequence, we
+ # will exclude a trailing '+' character from LICENSE in
+ # case INCOMPATIBLE_LICENSE is not a 'X+' license.
+ lic = license
+ if not re.search('\+$', dwl):
+ lic = re.sub('\+', '', license)
+ if fnmatch(lic, dwl):
+ return False
+ return True
+
+class LicenseError(Exception):
+ pass
+
+class LicenseSyntaxError(LicenseError):
+ def __init__(self, licensestr, exc):
+ self.licensestr = licensestr
+ self.exc = exc
+ LicenseError.__init__(self)
+
+ def __str__(self):
+ return "error in '%s': %s" % (self.licensestr, self.exc)
+
+class InvalidLicense(LicenseError):
+ def __init__(self, license):
+ self.license = license
+ LicenseError.__init__(self)
+
+ def __str__(self):
+ return "invalid characters in license '%s'" % self.license
+
+license_operator_chars = '&|() '
+license_operator = re.compile('([' + license_operator_chars + '])')
+license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+
+class LicenseVisitor(ast.NodeVisitor):
+ """Get elements based on OpenEmbedded license strings"""
+ def get_elements(self, licensestr):
+ new_elements = []
+ elements = list([x for x in license_operator.split(licensestr) if x.strip()])
+ for pos, element in enumerate(elements):
+ if license_pattern.match(element):
+ if pos > 0 and license_pattern.match(elements[pos-1]):
+ new_elements.append('&')
+ element = '"' + element + '"'
+ elif not license_operator.match(element):
+ raise InvalidLicense(element)
+ new_elements.append(element)
+
+ return new_elements
+
+ """Syntax tree visitor which can accept elements previously generated with
+ OpenEmbedded license string"""
+ def visit_elements(self, elements):
+ self.visit(ast.parse(' '.join(elements)))
+
+ """Syntax tree visitor which can accept OpenEmbedded license strings"""
+ def visit_string(self, licensestr):
+ self.visit_elements(self.get_elements(licensestr))
+
+class FlattenVisitor(LicenseVisitor):
+ """Flatten a license tree (parsed from a string) by selecting one of each
+ set of OR options, in the way the user specifies"""
+ def __init__(self, choose_licenses):
+ self.choose_licenses = choose_licenses
+ self.licenses = []
+ LicenseVisitor.__init__(self)
+
+ def visit_Str(self, node):
+ self.licenses.append(node.s)
+
+ def visit_BinOp(self, node):
+ if isinstance(node.op, ast.BitOr):
+ left = FlattenVisitor(self.choose_licenses)
+ left.visit(node.left)
+
+ right = FlattenVisitor(self.choose_licenses)
+ right.visit(node.right)
+
+ selected = self.choose_licenses(left.licenses, right.licenses)
+ self.licenses.extend(selected)
+ else:
+ self.generic_visit(node)
+
+def flattened_licenses(licensestr, choose_licenses):
+ """Given a license string and choose_licenses function, return a flat list of licenses"""
+ flatten = FlattenVisitor(choose_licenses)
+ try:
+ flatten.visit_string(licensestr)
+ except SyntaxError as exc:
+ raise LicenseSyntaxError(licensestr, exc)
+ return flatten.licenses
+
+def is_included(licensestr, whitelist=None, blacklist=None):
+ """Given a license string and whitelist and blacklist, determine if the
+ license string matches the whitelist and does not match the blacklist.
+
+ Returns a tuple holding the boolean state and a list of the applicable
+ licenses that were excluded if state is False, or the licenses that were
+ included if the state is True.
+ """
+
+ def include_license(license):
+ return any(fnmatch(license, pattern) for pattern in whitelist)
+
+ def exclude_license(license):
+ return any(fnmatch(license, pattern) for pattern in blacklist)
+
+ def choose_licenses(alpha, beta):
+ """Select the option in an OR which is the 'best' (has the most
+ included licenses and no excluded licenses)."""
+ # The factor 1000 below is arbitrary, just expected to be much larger
+ # that the number of licenses actually specified. That way the weight
+ # will be negative if the list of licenses contains an excluded license,
+ # but still gives a higher weight to the list with the most included
+ # licenses.
+ alpha_weight = (len(list(filter(include_license, alpha))) -
+ 1000 * (len(list(filter(exclude_license, alpha))) > 0))
+ beta_weight = (len(list(filter(include_license, beta))) -
+ 1000 * (len(list(filter(exclude_license, beta))) > 0))
+ if alpha_weight >= beta_weight:
+ return alpha
+ else:
+ return beta
+
+ if not whitelist:
+ whitelist = ['*']
+
+ if not blacklist:
+ blacklist = []
+
+ licenses = flattened_licenses(licensestr, choose_licenses)
+ excluded = [lic for lic in licenses if exclude_license(lic)]
+ included = [lic for lic in licenses if include_license(lic)]
+ if excluded:
+ return False, excluded
+ else:
+ return True, included
+
+class ManifestVisitor(LicenseVisitor):
+ """Walk license tree (parsed from a string) removing the incompatible
+ licenses specified"""
+ def __init__(self, dont_want_licenses, canonical_license, d):
+ self._dont_want_licenses = dont_want_licenses
+ self._canonical_license = canonical_license
+ self._d = d
+ self._operators = []
+
+ self.licenses = []
+ self.licensestr = ''
+
+ LicenseVisitor.__init__(self)
+
+ def visit(self, node):
+ if isinstance(node, ast.Str):
+ lic = node.s
+
+ if license_ok(self._canonical_license(self._d, lic),
+ self._dont_want_licenses) == True:
+ if self._operators:
+ ops = []
+ for op in self._operators:
+ if op == '[':
+ ops.append(op)
+ elif op == ']':
+ ops.append(op)
+ else:
+ if not ops:
+ ops.append(op)
+ elif ops[-1] in ['[', ']']:
+ ops.append(op)
+ else:
+ ops[-1] = op
+
+ for op in ops:
+ if op == '[' or op == ']':
+ self.licensestr += op
+ elif self.licenses:
+ self.licensestr += ' ' + op + ' '
+
+ self._operators = []
+
+ self.licensestr += lic
+ self.licenses.append(lic)
+ elif isinstance(node, ast.BitAnd):
+ self._operators.append("&")
+ elif isinstance(node, ast.BitOr):
+ self._operators.append("|")
+ elif isinstance(node, ast.List):
+ self._operators.append("[")
+ elif isinstance(node, ast.Load):
+ self.licensestr += "]"
+
+ self.generic_visit(node)
+
+def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
+ """Given a license string and dont_want_licenses list,
+ return license string filtered and a list of licenses"""
+ manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
+
+ try:
+ elements = manifest.get_elements(licensestr)
+
+ # Replace '()' to '[]' for handle in ast as List and Load types.
+ elements = ['[' if e == '(' else e for e in elements]
+ elements = [']' if e == ')' else e for e in elements]
+
+ manifest.visit_elements(elements)
+ except SyntaxError as exc:
+ raise LicenseSyntaxError(licensestr, exc)
+
+ # Replace '[]' to '()' for output correct license.
+ manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
+
+ return (manifest.licensestr, manifest.licenses)
+
+class ListVisitor(LicenseVisitor):
+ """Record all different licenses found in the license string"""
+ def __init__(self):
+ self.licenses = set()
+
+ def visit_Str(self, node):
+ self.licenses.add(node.s)
+
+def list_licenses(licensestr):
+ """Simply get a list of all licenses mentioned in a license string.
+ Binary operators are not applied or taken into account in any way"""
+ visitor = ListVisitor()
+ try:
+ visitor.visit_string(licensestr)
+ except SyntaxError as exc:
+ raise LicenseSyntaxError(licensestr, exc)
+ return visitor.licenses
diff --git a/poky/meta/lib/oe/lsb.py b/poky/meta/lib/oe/lsb.py
new file mode 100644
index 000000000..71c0992c5
--- /dev/null
+++ b/poky/meta/lib/oe/lsb.py
@@ -0,0 +1,117 @@
+def get_os_release():
+ """Get all key-value pairs from /etc/os-release as a dict"""
+ from collections import OrderedDict
+
+ data = OrderedDict()
+ if os.path.exists('/etc/os-release'):
+ with open('/etc/os-release') as f:
+ for line in f:
+ try:
+ key, val = line.rstrip().split('=', 1)
+ except ValueError:
+ continue
+ data[key.strip()] = val.strip('"')
+ return data
+
+def release_dict_osr():
+ """ Populate a dict with pertinent values from /etc/os-release """
+ data = {}
+ os_release = get_os_release()
+ if 'ID' in os_release:
+ data['DISTRIB_ID'] = os_release['ID']
+ if 'VERSION_ID' in os_release:
+ data['DISTRIB_RELEASE'] = os_release['VERSION_ID']
+
+ return data
+
+def release_dict_lsb():
+ """ Return the output of lsb_release -ir as a dictionary """
+ from subprocess import PIPE
+
+ try:
+ output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
+ except bb.process.CmdError as exc:
+ return {}
+
+ lsb_map = { 'Distributor ID': 'DISTRIB_ID',
+ 'Release': 'DISTRIB_RELEASE'}
+ lsb_keys = lsb_map.keys()
+
+ data = {}
+ for line in output.splitlines():
+ if line.startswith("-e"):
+ line = line[3:]
+ try:
+ key, value = line.split(":\t", 1)
+ except ValueError:
+ continue
+ if key in lsb_keys:
+ data[lsb_map[key]] = value
+
+ if len(data.keys()) != 2:
+ return None
+
+ return data
+
+def release_dict_file():
+ """ Try to gather release information manually when other methods fail """
+ data = {}
+ try:
+ if os.path.exists('/etc/lsb-release'):
+ data = {}
+ with open('/etc/lsb-release') as f:
+ for line in f:
+ key, value = line.split("=", 1)
+ data[key] = value.strip()
+ elif os.path.exists('/etc/redhat-release'):
+ data = {}
+ with open('/etc/redhat-release') as f:
+ distro = f.readline().strip()
+ import re
+ match = re.match(r'(.*) release (.*) \((.*)\)', distro)
+ if match:
+ data['DISTRIB_ID'] = match.group(1)
+ data['DISTRIB_RELEASE'] = match.group(2)
+ elif os.path.exists('/etc/SuSE-release'):
+ data = {}
+ data['DISTRIB_ID'] = 'SUSE LINUX'
+ with open('/etc/SuSE-release') as f:
+ for line in f:
+ if line.startswith('VERSION = '):
+ data['DISTRIB_RELEASE'] = line[10:].rstrip()
+ break
+
+ except IOError:
+ return {}
+ return data
+
+def distro_identifier(adjust_hook=None):
+ """Return a distro identifier string based upon lsb_release -ri,
+ with optional adjustment via a hook"""
+
+ import re
+
+ # Try /etc/os-release first, then the output of `lsb_release -ir` and
+ # finally fall back on parsing various release files in order to determine
+ # host distro name and version.
+ distro_data = release_dict_osr()
+ if not distro_data:
+ distro_data = release_dict_lsb()
+ if not distro_data:
+ distro_data = release_dict_file()
+
+ distro_id = distro_data.get('DISTRIB_ID', '')
+ release = distro_data.get('DISTRIB_RELEASE', '')
+
+ if adjust_hook:
+ distro_id, release = adjust_hook(distro_id, release)
+ if not distro_id:
+ return "Unknown"
+ # Filter out any non-alphanumerics
+ distro_id = re.sub(r'\W', '', distro_id)
+
+ if release:
+ id_str = '{0}-{1}'.format(distro_id.lower(), release)
+ else:
+ id_str = distro_id
+ return id_str.replace(' ','-').replace('/','-')
diff --git a/poky/meta/lib/oe/maketype.py b/poky/meta/lib/oe/maketype.py
new file mode 100644
index 000000000..f88981dd9
--- /dev/null
+++ b/poky/meta/lib/oe/maketype.py
@@ -0,0 +1,102 @@
+"""OpenEmbedded variable typing support
+
+Types are defined in the metadata by name, using the 'type' flag on a
+variable. Other flags may be utilized in the construction of the types. See
+the arguments of the type's factory for details.
+"""
+
+import inspect
+import oe.types as types
+import collections
+
+available_types = {}
+
+class MissingFlag(TypeError):
+ """A particular flag is required to construct the type, but has not been
+ provided."""
+ def __init__(self, flag, type):
+ self.flag = flag
+ self.type = type
+ TypeError.__init__(self)
+
+ def __str__(self):
+ return "Type '%s' requires flag '%s'" % (self.type, self.flag)
+
+def factory(var_type):
+ """Return the factory for a specified type."""
+ if var_type is None:
+ raise TypeError("No type specified. Valid types: %s" %
+ ', '.join(available_types))
+ try:
+ return available_types[var_type]
+ except KeyError:
+ raise TypeError("Invalid type '%s':\n Valid types: %s" %
+ (var_type, ', '.join(available_types)))
+
+def create(value, var_type, **flags):
+ """Create an object of the specified type, given the specified flags and
+ string value."""
+ obj = factory(var_type)
+ objflags = {}
+ for flag in obj.flags:
+ if flag not in flags:
+ if flag not in obj.optflags:
+ raise MissingFlag(flag, var_type)
+ else:
+ objflags[flag] = flags[flag]
+
+ return obj(value, **objflags)
+
+def get_callable_args(obj):
+ """Grab all but the first argument of the specified callable, returning
+ the list, as well as a list of which of the arguments have default
+ values."""
+ if type(obj) is type:
+ obj = obj.__init__
+
+ sig = inspect.signature(obj)
+ args = list(sig.parameters.keys())
+ defaults = list(s for s in sig.parameters.keys() if sig.parameters[s].default != inspect.Parameter.empty)
+ flaglist = []
+ if args:
+ if len(args) > 1 and args[0] == 'self':
+ args = args[1:]
+ flaglist.extend(args)
+
+ optional = set()
+ if defaults:
+ optional |= set(flaglist[-len(defaults):])
+ return flaglist, optional
+
+def factory_setup(name, obj):
+ """Prepare a factory for use."""
+ args, optional = get_callable_args(obj)
+ extra_args = args[1:]
+ if extra_args:
+ obj.flags, optional = extra_args, optional
+ obj.optflags = set(optional)
+ else:
+ obj.flags = obj.optflags = ()
+
+ if not hasattr(obj, 'name'):
+ obj.name = name
+
+def register(name, factory):
+ """Register a type, given its name and a factory callable.
+
+ Determines the required and optional flags from the factory's
+ arguments."""
+ factory_setup(name, factory)
+ available_types[factory.name] = factory
+
+
+# Register all our included types
+for name in dir(types):
+ if name.startswith('_'):
+ continue
+
+ obj = getattr(types, name)
+ if not isinstance(obj, collections.Callable):
+ continue
+
+ register(name, obj)
diff --git a/poky/meta/lib/oe/manifest.py b/poky/meta/lib/oe/manifest.py
new file mode 100644
index 000000000..674303c86
--- /dev/null
+++ b/poky/meta/lib/oe/manifest.py
@@ -0,0 +1,344 @@
+from abc import ABCMeta, abstractmethod
+import os
+import re
+import bb
+
+
+class Manifest(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ PKG_TYPE_MUST_INSTALL = "mip"
+ PKG_TYPE_MULTILIB = "mlp"
+ PKG_TYPE_LANGUAGE = "lgp"
+ PKG_TYPE_ATTEMPT_ONLY = "aop"
+
+ MANIFEST_TYPE_IMAGE = "image"
+ MANIFEST_TYPE_SDK_HOST = "sdk_host"
+ MANIFEST_TYPE_SDK_TARGET = "sdk_target"
+
+ var_maps = {
+ MANIFEST_TYPE_IMAGE: {
+ "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
+ "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
+ "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
+ },
+ MANIFEST_TYPE_SDK_HOST: {
+ "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
+ "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+ },
+ MANIFEST_TYPE_SDK_TARGET: {
+ "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
+ "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+ }
+ }
+
+ INSTALL_ORDER = [
+ PKG_TYPE_LANGUAGE,
+ PKG_TYPE_MUST_INSTALL,
+ PKG_TYPE_ATTEMPT_ONLY,
+ PKG_TYPE_MULTILIB
+ ]
+
+ initial_manifest_file_header = \
+ "# This file was generated automatically and contains the packages\n" \
+ "# passed on to the package manager in order to create the rootfs.\n\n" \
+ "# Format:\n" \
+ "# <package_type>,<package_name>\n" \
+ "# where:\n" \
+ "# <package_type> can be:\n" \
+ "# 'mip' = must install package\n" \
+ "# 'aop' = attempt only package\n" \
+ "# 'mlp' = multilib package\n" \
+ "# 'lgp' = language package\n\n"
+
+ def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
+ self.d = d
+ self.manifest_type = manifest_type
+
+ if manifest_dir is None:
+ if manifest_type != self.MANIFEST_TYPE_IMAGE:
+ self.manifest_dir = self.d.getVar('SDK_DIR')
+ else:
+ self.manifest_dir = self.d.getVar('WORKDIR')
+ else:
+ self.manifest_dir = manifest_dir
+
+ bb.utils.mkdirhier(self.manifest_dir)
+
+ self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
+ self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
+ self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
+
+ # packages in the following vars will be split in 'must install' and
+ # 'multilib'
+ self.vars_to_split = ["PACKAGE_INSTALL",
+ "TOOLCHAIN_HOST_TASK",
+ "TOOLCHAIN_TARGET_TASK"]
+
+ """
+ This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
+ This will be used for testing until the class is implemented properly!
+ """
+ def _create_dummy_initial(self):
+ image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+ pkg_list = dict()
+ if image_rootfs.find("core-image-sato-sdk") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+ "packagegroup-core-x11-sato-games packagegroup-base-extended " \
+ "packagegroup-core-x11-sato packagegroup-core-x11-base " \
+ "packagegroup-core-sdk packagegroup-core-tools-debug " \
+ "packagegroup-core-boot packagegroup-core-tools-testapps " \
+ "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
+ "apt packagegroup-core-tools-profile psplash " \
+ "packagegroup-core-standalone-sdk-target " \
+ "packagegroup-core-ssh-openssh dpkg kernel-dev"
+ pkg_list[self.PKG_TYPE_LANGUAGE] = \
+ "locale-base-en-us locale-base-en-gb"
+ elif image_rootfs.find("core-image-sato") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+ "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
+ "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
+ "packagegroup-core-x11-sato packagegroup-core-boot"
+ pkg_list['lgp'] = \
+ "locale-base-en-us locale-base-en-gb"
+ elif image_rootfs.find("core-image-minimal") > 0:
+ pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for pkg_type in pkg_list:
+ for pkg in pkg_list[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ """
+ This will create the initial manifest which will be used by Rootfs class to
+ generate the rootfs
+ """
+ @abstractmethod
+ def create_initial(self):
+ pass
+
+ """
+ This creates the manifest after everything has been installed.
+ """
+ @abstractmethod
+ def create_final(self):
+ pass
+
+ """
+ This creates the manifest after the package in initial manifest has been
+ dummy installed. It lists all *to be installed* packages. There is no real
+ installation, just a test.
+ """
+ @abstractmethod
+ def create_full(self, pm):
+ pass
+
+ """
+ The following function parses an initial manifest and returns a dictionary
+ object with the must install, attempt only, multilib and language packages.
+ """
+ def parse_initial_manifest(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest) as manifest:
+ for line in manifest.read().split('\n'):
+ comment = re.match("^#.*", line)
+ pattern = "^(%s|%s|%s|%s),(.*)$" % \
+ (self.PKG_TYPE_MUST_INSTALL,
+ self.PKG_TYPE_ATTEMPT_ONLY,
+ self.PKG_TYPE_MULTILIB,
+ self.PKG_TYPE_LANGUAGE)
+ pkg = re.match(pattern, line)
+
+ if comment is not None:
+ continue
+
+ if pkg is not None:
+ pkg_type = pkg.group(1)
+ pkg_name = pkg.group(2)
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = [pkg_name]
+ else:
+ pkgs[pkg_type].append(pkg_name)
+
+ return pkgs
+
+ '''
+ This following function parses a full manifest and return a list
+ object with packages.
+ '''
+ def parse_full_manifest(self):
+ installed_pkgs = list()
+ if not os.path.exists(self.full_manifest):
+ bb.note('full manifest not exist')
+ return installed_pkgs
+
+ with open(self.full_manifest, 'r') as manifest:
+ for pkg in manifest.read().split('\n'):
+ installed_pkgs.append(pkg.strip())
+
+ return installed_pkgs
+
+
+class RpmManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in pkgs:
+ for pkg in pkgs[pkg_type].split():
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
+
+
+class OpkgManifest(Manifest):
+ """
+ Returns a dictionary object with mip and mlp packages.
+ """
+ def _split_multilib(self, pkg_list):
+ pkgs = dict()
+
+ for pkg in pkg_list.split():
+ pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+ ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
+
+ for ml_variant in ml_variants:
+ if pkg.startswith(ml_variant + '-'):
+ pkg_type = self.PKG_TYPE_MULTILIB
+
+ if not pkg_type in pkgs:
+ pkgs[pkg_type] = pkg
+ else:
+ pkgs[pkg_type] += " " + pkg
+
+ return pkgs
+
+ def create_initial(self):
+ pkgs = dict()
+
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ if var in self.vars_to_split:
+ split_pkgs = self._split_multilib(self.d.getVar(var))
+ if split_pkgs is not None:
+ pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
+ else:
+ pkg_list = self.d.getVar(var)
+ if pkg_list is not None:
+ pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
+
+ for pkg_type in sorted(pkgs):
+ for pkg in sorted(pkgs[pkg_type].split()):
+ manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ if not os.path.exists(self.initial_manifest):
+ self.create_initial()
+
+ initial_manifest = self.parse_initial_manifest()
+ pkgs_to_install = list()
+ for pkg_type in initial_manifest:
+ pkgs_to_install += initial_manifest[pkg_type]
+ if len(pkgs_to_install) == 0:
+ return
+
+ output = pm.dummy_install(pkgs_to_install)
+
+ with open(self.full_manifest, 'w+') as manifest:
+ pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+ for line in set(output.split('\n')):
+ m = pkg_re.match(line)
+ if m:
+ manifest.write(m.group(1) + '\n')
+
+ return
+
+
+class DpkgManifest(Manifest):
+ def create_initial(self):
+ with open(self.initial_manifest, "w+") as manifest:
+ manifest.write(self.initial_manifest_file_header)
+
+ for var in self.var_maps[self.manifest_type]:
+ pkg_list = self.d.getVar(var)
+
+ if pkg_list is None:
+ continue
+
+ for pkg in pkg_list.split():
+ manifest.write("%s,%s\n" %
+ (self.var_maps[self.manifest_type][var], pkg))
+
+ def create_final(self):
+ pass
+
+ def create_full(self, pm):
+ pass
+
+
+def create_manifest(d, final_manifest=False, manifest_dir=None,
+ manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
+ manifest_map = {'rpm': RpmManifest,
+ 'ipk': OpkgManifest,
+ 'deb': DpkgManifest}
+
+ manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
+
+ if final_manifest:
+ manifest.create_final()
+ else:
+ manifest.create_initial()
+
+
+if __name__ == "__main__":
+ pass
diff --git a/poky/meta/lib/oe/package.py b/poky/meta/lib/oe/package.py
new file mode 100644
index 000000000..4f3e21ad4
--- /dev/null
+++ b/poky/meta/lib/oe/package.py
@@ -0,0 +1,294 @@
+def runstrip(arg):
+ # Function to strip a single file, called from split_and_strip_files below
+ # A working 'file' (one which works on the target architecture)
+ #
+ # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # us what type of file we're processing...
+ # 4 - executable
+ # 8 - shared library
+ # 16 - kernel module
+
+ import stat, subprocess
+
+ (file, elftype, strip) = arg
+
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
+
+ stripcmd = [strip]
+
+ # kernel module
+ if elftype & 16:
+ stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+ "--remove-section=.note", "--preserve-dates"])
+ # .so and shared library
+ elif ".so" in file and elftype & 8:
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
+ # shared or executable:
+ elif elftype & 8 or elftype & 4:
+ stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
+
+ stripcmd.append(file)
+ bb.debug(1, "runstrip: %s" % stripcmd)
+
+ try:
+ output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return
+
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=False):
+ """
+ Strip executable code (like executables, shared libraries) _in_place_
+ - Based on sysroot_strip in staging.bbclass
+ :param dstdir: directory in which to strip files
+ :param strip_cmd: Strip command (usually ${STRIP})
+ :param libdir: ${libdir} - strip .so files in this directory
+ :param base_libdir: ${base_libdir} - strip .so files in this directory
+ :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
+ This is for proper logging and messages only.
+ """
+ import stat, errno, oe.path, oe.utils, mmap
+
+ # Detect .ko module by searching for "vermagic=" string
+ def is_kernel_module(path):
+ with open(path) as f:
+ return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
+
+ # Return type (bits):
+ # 0 - not elf
+ # 1 - ELF
+ # 2 - stripped
+ # 4 - executable
+ # 8 - shared library
+ # 16 - kernel module
+ def is_elf(path):
+ exec_type = 0
+ ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
+
+ if ret:
+ bb.error("split_and_strip_files: 'file %s' failed" % path)
+ return exec_type
+
+ if "ELF" in result:
+ exec_type |= 1
+ if "not stripped" not in result:
+ exec_type |= 2
+ if "executable" in result:
+ exec_type |= 4
+ if "shared" in result:
+ exec_type |= 8
+ if "relocatable" in result and is_kernel_module(path):
+ exec_type |= 16
+ return exec_type
+
+ elffiles = {}
+ inodes = {}
+ libdir = os.path.abspath(dstdir + os.sep + libdir)
+ base_libdir = os.path.abspath(dstdir + os.sep + base_libdir)
+ exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+ #
+ # First lets figure out all of the files we may have to process
+ #
+ for root, dirs, files in os.walk(dstdir):
+ for f in files:
+ file = os.path.join(root, f)
+
+ try:
+ ltarget = oe.path.realpath(file, dstdir, False)
+ s = os.lstat(ltarget)
+ except OSError as e:
+ (err, strerror) = e.args
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ if not s:
+ continue
+ # Check its an excutable
+ if s[stat.ST_MODE] & exec_mask \
+ or ((file.startswith(libdir) or file.startswith(base_libdir)) and ".so" in f) \
+ or file.endswith('.ko'):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ continue
+
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ elf_file = is_elf(file)
+ if elf_file & 1:
+ if elf_file & 2:
+ if qa_already_stripped:
+ bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dstdir):], pn))
+ else:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
+ continue
+
+ if s.st_ino in inodes:
+ os.unlink(file)
+ os.link(inodes[s.st_ino], file)
+ else:
+ # break hardlinks so that we do not strip the original.
+ inodes[s.st_ino] = file
+ bb.utils.copyfile(file, file)
+ elffiles[file] = elf_file
+
+ #
+ # Now strip them (in parallel)
+ #
+ sfiles = []
+ for file in elffiles:
+ elf_file = int(elffiles[file])
+ sfiles.append((file, elf_file, strip_cmd))
+
+ oe.utils.multiprocess_exec(sfiles, runstrip)
+
+
+
+def file_translate(file):
+ ft = file.replace("@", "@at@")
+ ft = ft.replace(" ", "@space@")
+ ft = ft.replace("\t", "@tab@")
+ ft = ft.replace("[", "@openbrace@")
+ ft = ft.replace("]", "@closebrace@")
+ ft = ft.replace("_", "@underscore@")
+ return ft
+
+def filedeprunner(arg):
+ import re, subprocess, shlex
+
+ (pkg, pkgfiles, rpmdeps, pkgdest) = arg
+ provides = {}
+ requires = {}
+
+ file_re = re.compile(r'\s+\d+\s(.*)')
+ dep_re = re.compile(r'\s+(\S)\s+(.*)')
+ r = re.compile(r'[<>=]+\s+\S*')
+
+ def process_deps(pipe, pkg, pkgdest, provides, requires):
+ file = None
+ for line in pipe.split("\n"):
+
+ m = file_re.match(line)
+ if m:
+ file = m.group(1)
+ file = file.replace(pkgdest + "/" + pkg, "")
+ file = file_translate(file)
+ continue
+
+ m = dep_re.match(line)
+ if not m or not file:
+ continue
+
+ type, dep = m.groups()
+
+ if type == 'R':
+ i = requires
+ elif type == 'P':
+ i = provides
+ else:
+ continue
+
+ if dep.startswith("python("):
+ continue
+
+ # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
+ # are typically used conditionally from the Perl code, but are
+ # generated as unconditional dependencies.
+ if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
+ continue
+
+ # Ignore perl dependencies on .pl files.
+ if dep.startswith('perl(') and dep.endswith('.pl)'):
+ continue
+
+ # Remove perl versions and perl module versions since they typically
+ # do not make sense when used as package versions.
+ if dep.startswith('perl') and r.search(dep):
+ dep = dep.split()[0]
+
+ # Put parentheses around any version specifications.
+ dep = r.sub(r'(\g<0>)',dep)
+
+ if file not in i:
+ i[file] = []
+ i[file].append(dep)
+
+ return provides, requires
+
+ output = subprocess.check_output(shlex.split(rpmdeps) + pkgfiles, stderr=subprocess.STDOUT).decode("utf-8")
+ provides, requires = process_deps(output, pkg, pkgdest, provides, requires)
+
+ return (pkg, provides, requires)
+
+
+def read_shlib_providers(d):
+ import re
+
+ shlib_provider = {}
+ shlibs_dirs = d.getVar('SHLIBSDIRS').split()
+ list_re = re.compile('^(.*)\.list$')
+ # Go from least to most specific since the last one found wins
+ for dir in reversed(shlibs_dirs):
+ bb.debug(2, "Reading shlib providers in %s" % (dir))
+ if not os.path.exists(dir):
+ continue
+ for file in os.listdir(dir):
+ m = list_re.match(file)
+ if m:
+ dep_pkg = m.group(1)
+ try:
+ fd = open(os.path.join(dir, file))
+ except IOError:
+ # During a build unrelated shlib files may be deleted, so
+ # handle files disappearing between the listdirs and open.
+ continue
+ lines = fd.readlines()
+ fd.close()
+ for l in lines:
+ s = l.strip().split(":")
+ if s[0] not in shlib_provider:
+ shlib_provider[s[0]] = {}
+ shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
+ return shlib_provider
+
+
+def npm_split_package_dirs(pkgdir):
+ """
+ Work out the packages fetched and unpacked by BitBake's npm fetcher
+ Returns a dict of packagename -> (relpath, package.json) ordered
+ such that it is suitable for use in PACKAGES and FILES
+ """
+ from collections import OrderedDict
+ import json
+ packages = {}
+ for root, dirs, files in os.walk(pkgdir):
+ if os.path.basename(root) == 'node_modules':
+ for dn in dirs:
+ relpth = os.path.relpath(os.path.join(root, dn), pkgdir)
+ pkgitems = ['${PN}']
+ for pathitem in relpth.split('/'):
+ if pathitem == 'node_modules':
+ continue
+ pkgitems.append(pathitem)
+ pkgname = '-'.join(pkgitems).replace('_', '-')
+ pkgname = pkgname.replace('@', '')
+ pkgfile = os.path.join(root, dn, 'package.json')
+ data = None
+ if os.path.exists(pkgfile):
+ with open(pkgfile, 'r') as f:
+ data = json.loads(f.read())
+ packages[pkgname] = (relpth, data)
+ # We want the main package for a module sorted *after* its subpackages
+ # (so that it doesn't otherwise steal the files for the subpackage), so
+ # this is a cheap way to do that whilst still having an otherwise
+ # alphabetical sort
+ return OrderedDict((key, packages[key]) for key in sorted(packages, key=lambda pkg: pkg + '~'))
diff --git a/poky/meta/lib/oe/package_manager.py b/poky/meta/lib/oe/package_manager.py
new file mode 100644
index 000000000..2d8aeba03
--- /dev/null
+++ b/poky/meta/lib/oe/package_manager.py
@@ -0,0 +1,1787 @@
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import multiprocessing
+import re
+import collections
+import bb
+import tempfile
+import oe.utils
+import oe.path
+import string
+from oe.gpg_sign import get_signer
+import hashlib
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+ index_cmd = arg
+
+ bb.note("Executing '%s' ..." % index_cmd)
+ result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ if result:
+ bb.note(result)
+
+def opkg_query(cmd_output):
+ """
+ This method parse the output from the package managerand return
+ a dictionary with the information of the packages. This is used
+ when the packages are in deb or ipk format.
+ """
+ verregex = re.compile(' \([=<>]* [^ )]*\)')
+ output = dict()
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ pkgarch = ""
+ for line in cmd_output.splitlines():
+ line = line.rstrip()
+ if ':' in line:
+ if line.startswith("Package: "):
+ pkg = line.split(": ")[1]
+ elif line.startswith("Architecture: "):
+ arch = line.split(": ")[1]
+ elif line.startswith("Version: "):
+ ver = line.split(": ")[1]
+ elif line.startswith("File: ") or line.startswith("Filename:"):
+ filename = line.split(": ")[1]
+ if "/" in filename:
+ filename = os.path.basename(filename)
+ elif line.startswith("Depends: "):
+ depends = verregex.sub('', line.split(": ")[1])
+ for depend in depends.split(", "):
+ dep.append(depend)
+ elif line.startswith("Recommends: "):
+ recommends = verregex.sub('', line.split(": ")[1])
+ for recommend in recommends.split(", "):
+ dep.append("%s [REC]" % recommend)
+ elif line.startswith("PackageArch: "):
+ pkgarch = line.split(": ")[1]
+
+ # When there is a blank line save the package information
+ elif not line:
+ # IPK doesn't include the filename
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ if pkg:
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep, "pkgarch":pkgarch }
+ pkg = ""
+ arch = ""
+ ver = ""
+ filename = ""
+ dep = []
+ pkgarch = ""
+
+ if pkg:
+ if not filename:
+ filename = "%s_%s_%s.ipk" % (pkg, ver, arch)
+ output[pkg] = {"arch":arch, "ver":ver,
+ "filename":filename, "deps": dep }
+
+ return output
+
+# Note: this should be bb.fatal in the future.
+def failed_postinsts_warn(pkgs, log_path):
+ bb.warn("""Intentionally failing postinstall scriptlets of %s to defer them to first boot is deprecated. Please place them into pkg_postinst_ontarget_${PN} ().
+If deferring to first boot wasn't the intent, then scriptlet failure may mean an issue in the recipe, or a regression elsewhere.
+Details of the failure are in %s.""" %(pkgs, log_path))
+
+class Indexer(object, metaclass=ABCMeta):
+ def __init__(self, d, deploy_dir):
+ self.d = d
+ self.deploy_dir = deploy_dir
+
+ @abstractmethod
+ def write_index(self):
+ pass
+
+
+class RpmIndexer(Indexer):
+ def write_index(self):
+ self.do_write_index(self.deploy_dir)
+
+ def do_write_index(self, deploy_dir):
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+ result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
+ if result:
+ bb.fatal(result)
+
+ # Sign repomd
+ if signer:
+ sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (sig_type.upper() != "BIN")
+ signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+class RpmSubdirIndexer(RpmIndexer):
+ def write_index(self):
+ bb.note("Generating package index for %s" %(self.deploy_dir))
+ self.do_write_index(self.deploy_dir)
+ for entry in os.walk(self.deploy_dir):
+ if os.path.samefile(self.deploy_dir, entry[0]):
+ for dir in entry[1]:
+ if dir != 'repodata':
+ dir_path = oe.path.join(self.deploy_dir, dir)
+ bb.note("Generating package index for %s" %(dir_path))
+ self.do_write_index(dir_path)
+
+class OpkgIndexer(Indexer):
+ def write_index(self):
+ arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+ "SDK_PACKAGE_ARCHS",
+ ]
+
+ opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
+ else:
+ signer = None
+
+ if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+ open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+ index_cmds = set()
+ index_sign_files = set()
+ for arch_var in arch_vars:
+ archs = self.d.getVar(arch_var)
+ if archs is None:
+ continue
+
+ for arch in archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+ if not os.path.isdir(pkgs_dir):
+ continue
+
+ if not os.path.exists(pkgs_file):
+ open(pkgs_file, "w").close()
+
+ index_cmds.add('%s -r %s -p %s -m %s' %
+ (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+
+ index_sign_files.add(pkgs_file)
+
+ if len(index_cmds) == 0:
+ bb.note("There are no packages in %s!" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_exec(index_cmds, create_index)
+
+ if signer:
+ feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
+ is_ascii_sig = (feed_sig_type.upper() != "BIN")
+ for f in index_sign_files:
+ signer.detach_sign(f,
+ self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+ self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
+ armor=is_ascii_sig)
+
+
+class DpkgIndexer(Indexer):
+ def _create_configs(self):
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+ bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"),
+ "w") as prefs_file:
+ pass
+ with open(os.path.join(self.apt_conf_dir, "sources.list"),
+ "w+") as sources_file:
+ pass
+
+ with open(self.apt_conf_file, "w") as apt_conf:
+ with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+ "apt", "apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ line = re.sub("#ROOTFS#", "/dev/null", line)
+ line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ def write_index(self):
+ self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+ "apt-ftparchive")
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self._create_configs()
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ pkg_archs = self.d.getVar('PACKAGE_ARCHS')
+ if pkg_archs is not None:
+ arch_list = pkg_archs.split()
+ sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
+ if sdk_pkg_archs is not None:
+ for a in sdk_pkg_archs.split():
+ if a not in pkg_archs:
+ arch_list.append(a)
+
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+ apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+ gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+ index_cmds = []
+ deb_dirs_found = False
+ for arch in arch_list:
+ arch_dir = os.path.join(self.deploy_dir, arch)
+ if not os.path.isdir(arch_dir):
+ continue
+
+ cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+ cmd += "%s -fcn Packages > Packages.gz;" % gzip
+
+ with open(os.path.join(arch_dir, "Release"), "w+") as release:
+ release.write("Label: %s\n" % arch)
+
+ cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+
+ index_cmds.append(cmd)
+
+ deb_dirs_found = True
+
+ if not deb_dirs_found:
+ bb.note("There are no packages in %s" % self.deploy_dir)
+ return
+
+ oe.utils.multiprocess_exec(index_cmds, create_index)
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ raise NotImplementedError('Package feed signing not implementd for dpkg')
+
+
+
+class PkgsList(object, metaclass=ABCMeta):
+ def __init__(self, d, rootfs_dir):
+ self.d = d
+ self.rootfs_dir = rootfs_dir
+
+ @abstractmethod
+ def list_pkgs(self):
+ pass
+
+class RpmPkgsList(PkgsList):
+ def list_pkgs(self):
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
+
+class OpkgPkgsList(PkgsList):
+ def __init__(self, d, rootfs_dir, config_file):
+ super(OpkgPkgsList, self).__init__(d, rootfs_dir)
+
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ def list_pkgs(self, format=None):
+ cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
+
+ # opkg returns success even when it printed some
+ # "Collected errors:" report to stderr. Mixing stderr into
+ # stdout then leads to random failures later on when
+ # parsing the output. To avoid this we need to collect both
+ # output streams separately and check for empty stderr.
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
+ cmd_output, cmd_stderr = p.communicate()
+ cmd_output = cmd_output.decode("utf-8")
+ cmd_stderr = cmd_stderr.decode("utf-8")
+ if p.returncode or cmd_stderr:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr))
+
+ return opkg_query(cmd_output)
+
+
+class DpkgPkgsList(PkgsList):
+
+ def list_pkgs(self):
+ cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+ "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+ "-W"]
+
+ cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
+
+ try:
+ cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get the installed packages list. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ return opkg_query(cmd_output)
+
+
+class PackageManager(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ def __init__(self, d, target_rootfs):
+ self.d = d
+ self.target_rootfs = target_rootfs
+ self.deploy_dir = None
+ self.deploy_lock = None
+ self._initialize_intercepts()
+
+ def _initialize_intercepts(self):
+ bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+ postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
+ if not postinst_intercepts_dir:
+ postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+ # As there might be more than one instance of PackageManager operating at the same time
+ # we need to isolate the intercept_scripts directories from each other,
+ # hence the ugly hash digest in dir name.
+ self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
+ "intercept_scripts-%s" %(hashlib.sha256(self.target_rootfs.encode()).hexdigest()) )
+
+ bb.utils.remove(self.intercepts_dir, True)
+ shutil.copytree(postinst_intercepts_dir, self.intercepts_dir)
+
+ @abstractmethod
+ def _handle_intercept_failure(self, failed_script):
+ pass
+
+ def _postpone_to_first_boot(self, postinst_intercept_hook):
+ with open(postinst_intercept_hook) as intercept:
+ registered_pkgs = None
+ for line in intercept.read().split("\n"):
+ m = re.match("^##PKGS:(.*)", line)
+ if m is not None:
+ registered_pkgs = m.group(1).strip()
+ break
+
+ if registered_pkgs is not None:
+ bb.note("If an image is being built, the postinstalls for the following packages "
+ "will be postponed for first boot: %s" %
+ registered_pkgs)
+
+ # call the backend dependent handler
+ self._handle_intercept_failure(registered_pkgs)
+
+
+ def run_intercepts(self):
+ intercepts_dir = self.intercepts_dir
+
+ bb.note("Running intercept scripts:")
+ os.environ['D'] = self.target_rootfs
+ os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+ for script in os.listdir(intercepts_dir):
+ script_full = os.path.join(intercepts_dir, script)
+
+ if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+ continue
+
+ if script == "delay_to_first_boot":
+ self._postpone_to_first_boot(script_full)
+ continue
+
+ bb.note("> Executing %s intercept ..." % script)
+
+ try:
+ output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+ if output: bb.note(output.decode("utf-8"))
+ except subprocess.CalledProcessError as e:
+ bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+ self._postpone_to_first_boot(script_full)
+
+ @abstractmethod
+ def update(self):
+ """
+ Update the package manager package database.
+ """
+ pass
+
+ @abstractmethod
+ def install(self, pkgs, attempt_only=False):
+ """
+ Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+ True, installation failures are ignored.
+ """
+ pass
+
+ @abstractmethod
+ def remove(self, pkgs, with_dependencies=True):
+ """
+ Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+ is False, then any dependencies are left in place.
+ """
+ pass
+
+ @abstractmethod
+ def write_index(self):
+ """
+ This function creates the index files
+ """
+ pass
+
+ @abstractmethod
+ def remove_packaging_data(self):
+ pass
+
+ @abstractmethod
+ def list_installed(self):
+ pass
+
+ @abstractmethod
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pass
+
+ @abstractmethod
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ """
+ Add remote package feeds into repository manager configuration. The parameters
+ for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+ See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+ for their description.
+ """
+ pass
+
+ def install_glob(self, globs, sdk=False):
+ """
+ Install all packages that match a glob.
+ """
+ # TODO don't have sdk here but have a property on the superclass
+ # (and respect in install_complementary)
+ if sdk:
+ pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}")
+ else:
+ pkgdatadir = self.d.getVar("PKGDATA_DIR")
+
+ try:
+ bb.note("Installing globbed packages...")
+ cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs]
+ pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ # Return code 1 means no packages matched
+ if e.returncode != 1:
+ bb.fatal("Could not compute globbed packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def install_complementary(self, globs=None):
+ """
+ Install complementary packages based upon the list of currently installed
+ packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+ these packages, if they don't exist then no error will occur. Note: every
+ backend needs to call this function explicitly after the normal package
+ installation
+ """
+ if globs is None:
+ globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
+ split_linguas = set()
+
+ for translation in self.d.getVar('IMAGE_LINGUAS').split():
+ split_linguas.add(translation)
+ split_linguas.add(translation.split('-')[0])
+
+ split_linguas = sorted(split_linguas)
+
+ for lang in split_linguas:
+ globs += " *-locale-%s" % lang
+
+ if globs is None:
+ return
+
+ # we need to write the list of installed packages to a file because the
+ # oe-pkgdata-util reads it from a file
+ with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+ pkgs = self.list_installed()
+ output = oe.utils.format_pkg_list(pkgs, "arch")
+ installed_pkgs.write(output)
+ installed_pkgs.flush()
+
+ cmd = ["oe-pkgdata-util",
+ "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+ globs]
+ exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+ if exclude:
+ cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+ try:
+ bb.note("Installing complementary packages ...")
+ bb.note('Running %s' % cmd)
+ complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+ self.install(complementary_pkgs.split(), attempt_only=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not compute complementary packages list. Command "
+ "'%s' returned %d:\n%s" %
+ (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def deploy_dir_lock(self):
+ if self.deploy_dir is None:
+ raise RuntimeError("deploy_dir is not set!")
+
+ lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+ self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+ def deploy_dir_unlock(self):
+ if self.deploy_lock is None:
+ return
+
+ bb.utils.unlockfile(self.deploy_lock)
+
+ self.deploy_lock = None
+
+ def construct_uris(self, uris, base_paths):
+ """
+ Construct URIs based on the following pattern: uri/base_path where 'uri'
+ and 'base_path' correspond to each element of the corresponding array
+ argument leading to len(uris) x len(base_paths) elements on the returned
+ array
+ """
+ def _append(arr1, arr2, sep='/'):
+ res = []
+ narr1 = [a.rstrip(sep) for a in arr1]
+ narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2]
+ for a1 in narr1:
+ if arr2:
+ for a2 in narr2:
+ res.append("%s%s%s" % (a1, sep, a2))
+ else:
+ res.append(a1)
+ return res
+ return _append(uris, base_paths)
+
+def create_packages_dir(d, rpm_repo_dir, deploydir, taskname, filterbydependencies):
+ """
+ Go through our do_package_write_X dependencies and hardlink the packages we depend
+ upon into the repo directory. This prevents us seeing other packages that may
+ have been built that we don't depend upon and also packages for architectures we don't
+ support.
+ """
+ import errno
+
+ taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+ mytaskname = d.getVar("BB_RUNTASK")
+ pn = d.getVar("PN")
+ seendirs = set()
+ multilibs = {}
+
+ rpm_subrepo_dir = oe.path.join(rpm_repo_dir, "rpm")
+
+ bb.utils.remove(rpm_subrepo_dir, recurse=True)
+ bb.utils.mkdirhier(rpm_subrepo_dir)
+
+ # Detect bitbake -b usage
+ nodeps = d.getVar("BB_LIMITEDDEPS") or False
+ if nodeps or not filterbydependencies:
+ oe.path.symlink(deploydir, rpm_subrepo_dir, True)
+ return
+
+ start = None
+ for dep in taskdepdata:
+ data = taskdepdata[dep]
+ if data[1] == mytaskname and data[0] == pn:
+ start = dep
+ break
+ if start is None:
+ bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+ rpmdeps = set()
+ start = [start]
+ seen = set(start)
+ # Support direct dependencies (do_rootfs -> rpms)
+ # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> rpms)
+ while start:
+ next = []
+ for dep2 in start:
+ for dep in taskdepdata[dep2][3]:
+ if taskdepdata[dep][0] != pn:
+ if "do_" + taskname in dep:
+ rpmdeps.add(dep)
+ elif dep not in seen:
+ next.append(dep)
+ seen.add(dep)
+ start = next
+
+ for dep in rpmdeps:
+ c = taskdepdata[dep][0]
+ manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+ if not manifest:
+ bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+ if not os.path.exists(manifest):
+ continue
+ with open(manifest, "r") as f:
+ for l in f:
+ l = l.strip()
+ dest = l.replace(deploydir, "")
+ dest = rpm_subrepo_dir + dest
+ if l.endswith("/"):
+ if dest not in seendirs:
+ bb.utils.mkdirhier(dest)
+ seendirs.add(dest)
+ continue
+ # Try to hardlink the file, copy if that fails
+ destdir = os.path.dirname(dest)
+ if destdir not in seendirs:
+ bb.utils.mkdirhier(destdir)
+ seendirs.add(destdir)
+ try:
+ os.link(l, dest)
+ except OSError as err:
+ if err.errno == errno.EXDEV:
+ bb.utils.copyfile(l, dest)
+ else:
+ raise
+
+class RpmPM(PackageManager):
+ def __init__(self,
+ d,
+ target_rootfs,
+ target_vendor,
+ task_name='target',
+ arch_var=None,
+ os_var=None,
+ rpm_repo_workdir="oe-rootfs-repo",
+ filterbydependencies=True):
+ super(RpmPM, self).__init__(d, target_rootfs)
+ self.target_vendor = target_vendor
+ self.task_name = task_name
+ if arch_var == None:
+ self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+ else:
+ self.archs = self.d.getVar(arch_var).replace("-","_")
+ if task_name == "host":
+ self.primary_arch = self.d.getVar('SDK_ARCH')
+ else:
+ self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, self.rpm_repo_dir, d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+
+ self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+ self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
+ self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+ self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ def _configure_dnf(self):
+ # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+ archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+ # This prevents accidental matching against libsolv's built-in policies
+ if len(archs) <= 1:
+ archs = archs + ["bogusarch"]
+ confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+ bb.utils.mkdirhier(confdir)
+ open(confdir + "arch", 'w').write(":".join(archs))
+ distro_codename = self.d.getVar('DISTRO_CODENAME')
+ open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
+
+ open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
+
+
+ def _configure_rpm(self):
+ # We need to configure rpm to use our primary package architecture as the installation architecture,
+ # and to make it compatible with other package architectures that we use.
+ # Otherwise it will refuse to proceed with packages installation.
+ platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+ rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+ bb.utils.mkdirhier(platformconfdir)
+ open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+ open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+
+ open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+ if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+ open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
+ else:
+ open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+ signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+ pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+ signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+ rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+ cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Importing GPG key failed. Command '%s' "
+ "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+
+ def create_configs(self):
+ self._configure_dnf()
+ self._configure_rpm()
+
+ def write_index(self):
+ lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+ lf = bb.utils.lockfile(lockfilename, False)
+ RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+ bb.utils.unlockfile(lf)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ from urllib.parse import urlparse
+
+ if feed_uris == "":
+ return
+
+ gpg_opts = ''
+ if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+ gpg_opts += 'repo_gpgcheck=1\n'
+ gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
+
+ if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
+ gpg_opts += 'gpgcheck=0\n'
+
+ bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+ remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ for uri in remote_uris:
+ repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+ if feed_archs is not None:
+ for arch in feed_archs.split():
+ repo_uri = uri + "/" + arch
+ repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/"))
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts))
+ else:
+ repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+ repo_uri = uri
+ open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+ "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts))
+
+ def _prepare_pkg_transaction(self):
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+
+ def install(self, pkgs, attempt_only = False):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
+
+ bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+ package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+ exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
+
+ output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+ (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+ (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+ (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+ ["install"] +
+ pkgs)
+
+ failed_scriptlets_pkgnames = collections.OrderedDict()
+ for line in output.splitlines():
+ if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
+ failed_scriptlets_pkgnames[line.split()[-1]] = True
+
+ if len(failed_scriptlets_pkgnames) > 0:
+ failed_postinsts_warn(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ for pkg in failed_scriptlets_pkgnames.keys():
+ self.save_rpmpostinst(pkg)
+
+ def remove(self, pkgs, with_dependencies = True):
+ if len(pkgs) == 0:
+ return
+ self._prepare_pkg_transaction()
+
+ if with_dependencies:
+ self._invoke_dnf(["remove"] + pkgs)
+ else:
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs]
+
+ try:
+ bb.note("Running %s" % ' '.join([cmd] + args + pkgs))
+ output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
+
+ def upgrade(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["upgrade"])
+
+ def autoremove(self):
+ self._prepare_pkg_transaction()
+ self._invoke_dnf(["autoremove"])
+
+ def remove_packaging_data(self):
+ self._invoke_dnf(["clean", "all"])
+ for dir in self.packaging_data_dirs:
+ bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
+
+ def backup_packaging_data(self):
+ # Save the packaging dirs for increment rpm image generation
+ if os.path.exists(self.saved_packaging_data):
+ bb.utils.remove(self.saved_packaging_data, True)
+ for i in self.packaging_data_dirs:
+ source_dir = oe.path.join(self.target_rootfs, i)
+ target_dir = oe.path.join(self.saved_packaging_data, i)
+ shutil.copytree(source_dir, target_dir, symlinks=True)
+
+ def recovery_packaging_data(self):
+ # Move the rpmlib back
+ if os.path.exists(self.saved_packaging_data):
+ for i in self.packaging_data_dirs:
+ target_dir = oe.path.join(self.target_rootfs, i)
+ if os.path.exists(target_dir):
+ bb.utils.remove(target_dir, True)
+ source_dir = oe.path.join(self.saved_packaging_data, i)
+ shutil.copytree(source_dir,
+ target_dir,
+ symlinks=True)
+
+ def list_installed(self):
+ output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+ print_output = False)
+ packages = {}
+ current_package = None
+ current_deps = None
+ current_state = "initial"
+ for line in output.splitlines():
+ if line.startswith("Package:"):
+ package_info = line.split(" ")[1:]
+ current_package = package_info[0]
+ package_arch = package_info[1]
+ package_version = package_info[2]
+ package_rpm = package_info[3]
+ packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+ current_deps = []
+ elif line.startswith("Dependencies:"):
+ current_state = "dependencies"
+ elif line.startswith("Recommendations"):
+ current_state = "recommendations"
+ elif line.startswith("DependenciesEndHere:"):
+ current_state = "initial"
+ packages[current_package]["deps"] = current_deps
+ elif len(line) > 0:
+ if current_state == "dependencies":
+ current_deps.append(line)
+ elif current_state == "recommendations":
+ current_deps.append("%s [REC]" % line)
+
+ return packages
+
+ def update(self):
+ self._invoke_dnf(["makecache", "--refresh"])
+
+ def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+ os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+ dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+ standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
+ "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+ "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+ "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
+ "--installroot=%s" % (self.target_rootfs),
+ "--setopt=logdir=%s" % (self.d.getVar('T'))
+ ]
+ cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+ bb.note('Running %s' % ' '.join(cmd))
+ try:
+ output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+ if print_output:
+ bb.note(output)
+ return output
+ except subprocess.CalledProcessError as e:
+ if print_output:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ else:
+ (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+ "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+ return e.output.decode("utf-8")
+
+ def dump_install_solution(self, pkgs):
+ open(self.solution_manifest, 'w').write(" ".join(pkgs))
+ return pkgs
+
+ def load_old_install_solution(self):
+ if not os.path.exists(self.solution_manifest):
+ return []
+
+ return open(self.solution_manifest, 'r').read().split()
+
+ def _script_num_prefix(self, path):
+ files = os.listdir(path)
+ numbers = set()
+ numbers.add(99)
+ for f in files:
+ numbers.add(int(f.split("-")[0]))
+ return max(numbers) + 1
+
+ def save_rpmpostinst(self, pkg):
+ bb.note("Saving postinstall script of %s" % (pkg))
+ cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+ args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
+
+ try:
+ output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Could not invoke rpm. Command "
+ "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
+
+ # may need to prepend #!/bin/sh to output
+
+ target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+ bb.utils.mkdirhier(target_path)
+ num = self._script_num_prefix(target_path)
+ saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+ open(saved_script_name, 'w').write(output)
+ os.chmod(saved_script_name, 0o755)
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+ bb.utils.mkdirhier(rpm_postinsts_dir)
+
+ # Save the package postinstalls in /etc/rpm-postinsts
+ for pkg in registered_pkgs.split():
+ self.save_rpmpostinst(pkg)
+
+ def extract(self, pkg):
+ output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+ pkg_name = output.splitlines()[-1]
+ if not pkg_name.endswith(".rpm"):
+ bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+ pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
+
+ cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
+ rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+
+ try:
+ cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd)
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+
+class OpkgDpkgPM(PackageManager):
+ def __init__(self, d, target_rootfs):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+ super(OpkgDpkgPM, self).__init__(d, target_rootfs)
+
+ def package_info(self, pkg, cmd):
+ """
+ Returns a dictionary with the package info.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to list available packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+ return opkg_query(output)
+
+ def extract(self, pkg, pkg_info):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+
+ This method extracts the common parts for Opkg and Dpkg
+ """
+
+ ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
+ tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
+ pkg_path = pkg_info[pkg]["filepath"]
+
+ if not os.path.isfile(pkg_path):
+ bb.fatal("Unable to extract package for '%s'."
+ "File %s doesn't exists" % (pkg, pkg_path))
+
+ tmp_dir = tempfile.mkdtemp()
+ current_dir = os.getcwd()
+ os.chdir(tmp_dir)
+ if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
+ data_tar = 'data.tar.xz'
+ else:
+ data_tar = 'data.tar.gz'
+
+ try:
+ cmd = [ar_cmd, 'x', pkg_path]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ cmd = [tar_cmd, 'xf', data_tar]
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+ except OSError as e:
+ bb.utils.remove(tmp_dir, recurse=True)
+ bb.fatal("Unable to extract %s package. Command '%s' "
+ "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
+
+ bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
+ bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
+ bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz"))
+ os.chdir(current_dir)
+
+ return tmp_dir
+
+ def _handle_intercept_failure(self, registered_pkgs):
+ self.mark_packages("unpacked", registered_pkgs.split())
+
+class OpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
+ super(OpkgPM, self).__init__(d, target_rootfs)
+
+ self.config_file = config_file
+ self.pkg_archs = archs
+ self.task_name = task_name
+
+ self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
+ self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+ self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+ self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
+ self.opkg_args += self.d.getVar("OPKG_ARGS")
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+
+ self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
+
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+ if not os.path.exists(self.d.expand('${T}/saved')):
+ bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+ self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
+ if self.from_feeds:
+ self._create_custom_config()
+ else:
+ self._create_config()
+
+ self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/opkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ os.rename(status_file + ".tmp", status_file)
+
+ def _create_custom_config(self):
+ bb.note("Building from feeds activated!")
+
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
+ feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+ if feed_match is not None:
+ feed_name = feed_match.group(1)
+ feed_uri = feed_match.group(2)
+
+ bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+ config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+ """
+ Allow to use package deploy directory contents as quick devel-testing
+ feed. This creates individual feed configs for each arch subdir of those
+ specified as compatible for the current machine.
+ NOTE: Development-helper feature, NOT a full-fledged feed.
+ """
+ if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
+ for arch in self.pkg_archs.split():
+ cfg_file_name = os.path.join(self.target_rootfs,
+ self.d.getVar("sysconfdir"),
+ "opkg",
+ "local-%s-feed.conf" % arch)
+
+ with open(cfg_file_name, "w+") as cfg_file:
+ cfg_file.write("src/gz local-%s %s/%s" %
+ (arch,
+ self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
+ arch))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+
+ def _create_config(self):
+ with open(self.config_file, "w+") as config_file:
+ priority = 1
+ for arch in self.pkg_archs.split():
+ config_file.write("arch %s %d\n" % (arch, priority))
+ priority += 5
+
+ config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+ for arch in self.pkg_archs.split():
+ pkgs_dir = os.path.join(self.deploy_dir, arch)
+ if os.path.isdir(pkgs_dir):
+ config_file.write("src oe-%s file:%s\n" %
+ (arch, pkgs_dir))
+
+ if self.d.getVar('OPKGLIBDIR') != '/var/lib':
+ # There is no command line option for this anymore, we need to add
+ # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
+ # the default value of "/var/lib" as defined in opkg:
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info"
+ # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status"
+ config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+ config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+ config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+ rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+ % self.target_rootfs)
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+ archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
+
+ with open(rootfs_config, "w+") as config_file:
+ uri_iterator = 0
+ for uri in feed_uris:
+ if archs:
+ for arch in archs:
+ if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
+ continue
+ bb.note('Adding opkg feed url-%s-%d (%s)' %
+ (arch, uri_iterator, uri))
+ config_file.write("src/gz uri-%s-%d %s/%s\n" %
+ (arch, uri_iterator, uri, arch))
+ else:
+ bb.note('Adding opkg feed url-%d (%s)' %
+ (uri_iterator, uri))
+ config_file.write("src/gz uri-%d %s\n" %
+ (uri_iterator, uri))
+
+ uri_iterator += 1
+
+ def update(self):
+ self.deploy_dir_lock()
+
+ cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ self.deploy_dir_unlock()
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if not pkgs:
+ return
+
+ cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ failed_pkgs = []
+ for line in output.split('\n'):
+ if line.endswith("configuration required on target."):
+ bb.warn(line)
+ failed_pkgs.append(line.split(".")[0])
+ if failed_pkgs:
+ failed_postinsts_warn(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def remove(self, pkgs, with_dependencies=True):
+ if with_dependencies:
+ cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+ else:
+ cmd = "%s %s --force-depends remove %s" % \
+ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+ try:
+ bb.note(cmd)
+ output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def remove_packaging_data(self):
+ bb.utils.remove(self.opkg_dir, True)
+ # create the directory back, it's needed by PM lock
+ bb.utils.mkdirhier(self.opkg_dir)
+
+ def remove_lists(self):
+ if not self.from_feeds:
+ bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True)
+
+ def list_installed(self):
+ return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
+
+ def handle_bad_recommendations(self):
+ bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
+ if bad_recommendations.strip() == "":
+ return
+
+ status_file = os.path.join(self.opkg_dir, "status")
+
+ # If status file existed, it means the bad recommendations has already
+ # been handled
+ if os.path.exists(status_file):
+ return
+
+ cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
+
+ with open(status_file, "w+") as status:
+ for pkg in bad_recommendations.split():
+ pkg_info = cmd + pkg
+
+ try:
+ output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot get package info. Command '%s' "
+ "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
+
+ if output == "":
+ bb.note("Ignored bad recommendation: '%s' is "
+ "not a package" % pkg)
+ continue
+
+ for line in output.split('\n'):
+ if line.startswith("Status:"):
+ status.write("Status: deinstall hold not-installed\n")
+ else:
+ status.write(line + "\n")
+
+ # Append a blank line after each package entry to ensure that it
+ # is separated from the following entry
+ status.write("\n")
+
+ def dummy_install(self, pkgs):
+ """
+ The following function dummy installs pkgs and returns the log of output.
+ """
+ if len(pkgs) == 0:
+ return
+
+ # Create an temp dir as opkg root for dummy installation
+ temp_rootfs = self.d.expand('${T}/opkg')
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ if opkg_lib_dir[0] == "/":
+ opkg_lib_dir = opkg_lib_dir[1:]
+ temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg')
+ bb.utils.mkdirhier(temp_opkg_dir)
+
+ opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+ opkg_args += self.d.getVar("OPKG_ARGS")
+
+ cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # Dummy installation
+ cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+ opkg_args,
+ ' '.join(pkgs))
+ try:
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to dummy install packages. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ bb.utils.remove(temp_rootfs, True)
+
+ return output
+
+ def backup_packaging_data(self):
+ # Save the opkglib for increment ipk image generation
+ if os.path.exists(self.saved_opkg_dir):
+ bb.utils.remove(self.saved_opkg_dir, True)
+ shutil.copytree(self.opkg_dir,
+ self.saved_opkg_dir,
+ symlinks=True)
+
+ def recover_packaging_data(self):
+ # Move the opkglib back
+ if os.path.exists(self.saved_opkg_dir):
+ if os.path.exists(self.opkg_dir):
+ bb.utils.remove(self.opkg_dir, True)
+
+ bb.note('Recover packaging data')
+ shutil.copytree(self.saved_opkg_dir,
+ self.opkg_dir,
+ symlinks=True)
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
+ pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["arch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
+
+ return tmp_dir
+
+class DpkgPM(OpkgDpkgPM):
+ def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
+ super(DpkgPM, self).__init__(d, target_rootfs)
+ self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
+ if apt_conf_dir is None:
+ self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+ else:
+ self.apt_conf_dir = apt_conf_dir
+ self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+ self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+ self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
+
+ self.apt_args = d.getVar("APT_ARGS")
+
+ self.all_arch_list = archs.split()
+ all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
+ self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+ self._create_configs(archs, base_archs)
+
+ self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+ def mark_packages(self, status_tag, packages=None):
+ """
+ This function will change a package's status in /var/lib/dpkg/status file.
+ If 'packages' is None then the new_status will be applied to all
+ packages
+ """
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+ with open(status_file, "r") as sf:
+ with open(status_file + ".tmp", "w+") as tmp_sf:
+ if packages is None:
+ tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+ r"Package: \1\n\2Status: \3%s" % status_tag,
+ sf.read()))
+ else:
+ if type(packages).__name__ != "list":
+ raise TypeError("'packages' should be a list object")
+
+ status = sf.read()
+ for pkg in packages:
+ status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+ r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+ status)
+
+ tmp_sf.write(status)
+
+ os.rename(status_file + ".tmp", status_file)
+
+ def run_pre_post_installs(self, package_name=None):
+ """
+ Run the pre/post installs for package "package_name". If package_name is
+ None, then run all pre/post install scriptlets.
+ """
+ info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+ ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+ control_scripts = [
+ ControlScript(".preinst", "Preinstall", "install"),
+ ControlScript(".postinst", "Postinstall", "configure")]
+ status_file = self.target_rootfs + "/var/lib/dpkg/status"
+ installed_pkgs = []
+
+ with open(status_file, "r") as status:
+ for line in status.read().split('\n'):
+ m = re.match("^Package: (.*)", line)
+ if m is not None:
+ installed_pkgs.append(m.group(1))
+
+ if package_name is not None and not package_name in installed_pkgs:
+ return
+
+ os.environ['D'] = self.target_rootfs
+ os.environ['OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+ os.environ['INTERCEPT_DIR'] = self.intercepts_dir
+ os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+ failed_pkgs = []
+ for pkg_name in installed_pkgs:
+ for control_script in control_scripts:
+ p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
+ if os.path.exists(p_full):
+ try:
+ bb.note("Executing %s for package: %s ..." %
+ (control_script.name.lower(), pkg_name))
+ output = subprocess.check_output([p_full, control_script.argument],
+ stderr=subprocess.STDOUT).decode("utf-8")
+ bb.note(output)
+ except subprocess.CalledProcessError as e:
+ bb.warn("%s for package %s failed with %d:\n%s" %
+ (control_script.name, pkg_name, e.returncode,
+ e.output.decode("utf-8")))
+ failed_postinsts_warn([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+ failed_pkgs.append(pkg_name)
+ break
+
+ if len(failed_pkgs):
+ self.mark_packages("unpacked", failed_pkgs)
+
+ def update(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ self.deploy_dir_lock()
+
+ cmd = "%s update" % self.apt_get_cmd
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to update the package index files. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ self.deploy_dir_unlock()
+
+ def install(self, pkgs, attempt_only=False):
+ if attempt_only and len(pkgs) == 0:
+ return
+
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
+ (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+
+ try:
+ bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
+ "Command '%s' returned %d:\n%s" %
+ (cmd, e.returncode, e.output.decode("utf-8")))
+
+ # rename *.dpkg-new files/dirs
+ for root, dirs, files in os.walk(self.target_rootfs):
+ for dir in dirs:
+ new_dir = re.sub("\.dpkg-new", "", dir)
+ if dir != new_dir:
+ os.rename(os.path.join(root, dir),
+ os.path.join(root, new_dir))
+
+ for file in files:
+ new_file = re.sub("\.dpkg-new", "", file)
+ if file != new_file:
+ os.rename(os.path.join(root, file),
+ os.path.join(root, new_file))
+
+
+ def remove(self, pkgs, with_dependencies=True):
+ if with_dependencies:
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+ cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+ else:
+ cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+ " -P --force-depends %s" % \
+ (bb.utils.which(os.getenv('PATH'), "dpkg"),
+ self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Unable to remove packages. Command '%s' "
+ "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8")))
+
+ def write_index(self):
+ self.deploy_dir_lock()
+
+ result = self.indexer.write_index()
+
+ self.deploy_dir_unlock()
+
+ if result is not None:
+ bb.fatal(result)
+
+ def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+ if feed_uris == "":
+ return
+
+ sources_conf = os.path.join("%s/etc/apt/sources.list"
+ % self.target_rootfs)
+ arch_list = []
+
+ if feed_archs is None:
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+ else:
+ arch_list = feed_archs.split()
+
+ feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+
+ with open(sources_conf, "w+") as sources_file:
+ for uri in feed_uris:
+ if arch_list:
+ for arch in arch_list:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb %s/%s ./\n" %
+ (uri, arch))
+ else:
+ bb.note('Adding dpkg channel at (%s)' % uri)
+ sources_file.write("deb %s ./\n" % uri)
+
+ def _create_configs(self, archs, base_archs):
+ base_archs = re.sub("_", "-", base_archs)
+
+ if os.path.exists(self.apt_conf_dir):
+ bb.utils.remove(self.apt_conf_dir, True)
+
+ bb.utils.mkdirhier(self.apt_conf_dir)
+ bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+ bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/")
+
+ arch_list = []
+ for arch in self.all_arch_list:
+ if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+ continue
+ arch_list.append(arch)
+
+ with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+ priority = 801
+ for arch in arch_list:
+ prefs_file.write(
+ "Package: *\n"
+ "Pin: release l=%s\n"
+ "Pin-Priority: %d\n\n" % (arch, priority))
+
+ priority += 5
+
+ pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
+ for pkg in pkg_exclude.split():
+ prefs_file.write(
+ "Package: %s\n"
+ "Pin: release *\n"
+ "Pin-Priority: -1\n\n" % pkg)
+
+ arch_list.reverse()
+
+ with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+ for arch in arch_list:
+ sources_file.write("deb file:%s/ ./\n" %
+ os.path.join(self.deploy_dir, arch))
+
+ base_arch_list = base_archs.split()
+ multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
+ for variant in multilib_variants.split():
+ localdata = bb.data.createCopy(self.d)
+ variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
+ orig_arch = localdata.getVar("DPKG_ARCH")
+ localdata.setVar("DEFAULTTUNE", variant_tune)
+ variant_arch = localdata.getVar("DPKG_ARCH")
+ if variant_arch not in base_arch_list:
+ base_arch_list.append(variant_arch)
+
+ with open(self.apt_conf_file, "w+") as apt_conf:
+ with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+ for line in apt_conf_sample.read().split("\n"):
+ match_arch = re.match(" Architecture \".*\";$", line)
+ architectures = ""
+ if match_arch:
+ for base_arch in base_arch_list:
+ architectures += "\"%s\";" % base_arch
+ apt_conf.write(" Architectures {%s};\n" % architectures);
+ apt_conf.write(" Architecture \"%s\";\n" % base_archs)
+ else:
+ line = re.sub("#ROOTFS#", self.target_rootfs, line)
+ line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ apt_conf.write(line + "\n")
+
+ target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+ bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+ if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+ open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+ if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+ open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+ def remove_packaging_data(self):
+ bb.utils.remove(os.path.join(self.target_rootfs,
+ self.d.getVar('opkglibdir')), True)
+ bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+ def fix_broken_dependencies(self):
+ os.environ['APT_CONFIG'] = self.apt_conf_file
+
+ cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
+
+ try:
+ subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Cannot fix broken dependencies. Command '%s' "
+ "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+
+ def list_installed(self):
+ return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
+
+ def package_info(self, pkg):
+ """
+ Returns a dictionary with the package info.
+ """
+ cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
+ pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
+
+ pkg_arch = pkg_info[pkg]["pkgarch"]
+ pkg_filename = pkg_info[pkg]["filename"]
+ pkg_info[pkg]["filepath"] = \
+ os.path.join(self.deploy_dir, pkg_arch, pkg_filename)
+
+ return pkg_info
+
+ def extract(self, pkg):
+ """
+ Returns the path to a tmpdir where resides the contents of a package.
+
+ Deleting the tmpdir is responsability of the caller.
+ """
+ pkg_info = self.package_info(pkg)
+ if not pkg_info:
+ bb.fatal("Unable to get information for package '%s' while "
+ "trying to extract the package." % pkg)
+
+ tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info)
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
+
+ return tmp_dir
+
+def generate_index_files(d):
+ classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
+
+ indexer_map = {
+ "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
+ "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+ "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
+ }
+
+ result = None
+
+ for pkg_class in classes:
+ if not pkg_class in indexer_map:
+ continue
+
+ if os.path.exists(indexer_map[pkg_class][1]):
+ result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+ if result is not None:
+ bb.fatal(result)
diff --git a/poky/meta/lib/oe/packagedata.py b/poky/meta/lib/oe/packagedata.py
new file mode 100644
index 000000000..32e5c82a9
--- /dev/null
+++ b/poky/meta/lib/oe/packagedata.py
@@ -0,0 +1,95 @@
+import codecs
+import os
+
+def packaged(pkg, d):
+ return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
+
+def read_pkgdatafile(fn):
+ pkgdata = {}
+
+ def decode(str):
+ c = codecs.getdecoder("unicode_escape")
+ return c(str)[0]
+
+ if os.access(fn, os.R_OK):
+ import re
+ f = open(fn, 'r')
+ lines = f.readlines()
+ f.close()
+ r = re.compile("([^:]+):\s*(.*)")
+ for l in lines:
+ m = r.match(l)
+ if m:
+ pkgdata[m.group(1)] = decode(m.group(2))
+
+ return pkgdata
+
+def get_subpkgedata_fn(pkg, d):
+ return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
+
+def has_subpkgdata(pkg, d):
+ return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
+
+def read_subpkgdata(pkg, d):
+ return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+
+def has_pkgdata(pn, d):
+ fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+ return os.access(fn, os.R_OK)
+
+def read_pkgdata(pn, d):
+ fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+ return read_pkgdatafile(fn)
+
+#
+# Collapse FOO_pkg variables into FOO
+#
+def read_subpkgdata_dict(pkg, d):
+ ret = {}
+ subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+ for var in subd:
+ newvar = var.replace("_" + pkg, "")
+ if newvar == var and var + "_" + pkg in subd:
+ continue
+ ret[newvar] = subd[var]
+ return ret
+
+def _pkgmap(d):
+ """Return a dictionary mapping package to recipe name."""
+
+ pkgdatadir = d.getVar("PKGDATA_DIR")
+
+ pkgmap = {}
+ try:
+ files = os.listdir(pkgdatadir)
+ except OSError:
+ bb.warn("No files in %s?" % pkgdatadir)
+ files = []
+
+ for pn in [f for f in files if not os.path.isdir(os.path.join(pkgdatadir, f))]:
+ try:
+ pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
+ except OSError:
+ continue
+
+ packages = pkgdata.get("PACKAGES") or ""
+ for pkg in packages.split():
+ pkgmap[pkg] = pn
+
+ return pkgmap
+
+def pkgmap(d):
+ """Return a dictionary mapping package to recipe name.
+ Cache the mapping in the metadata"""
+
+ pkgmap_data = d.getVar("__pkgmap_data", False)
+ if pkgmap_data is None:
+ pkgmap_data = _pkgmap(d)
+ d.setVar("__pkgmap_data", pkgmap_data)
+
+ return pkgmap_data
+
+def recipename(pkg, d):
+ """Return the recipe name for the given binary package name."""
+
+ return pkgmap(d).get(pkg)
diff --git a/poky/meta/lib/oe/packagegroup.py b/poky/meta/lib/oe/packagegroup.py
new file mode 100644
index 000000000..4bc5d3e4b
--- /dev/null
+++ b/poky/meta/lib/oe/packagegroup.py
@@ -0,0 +1,36 @@
+import itertools
+
+def is_optional(feature, d):
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
+ if packages:
+ return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
+ else:
+ return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
+
+def packages(features, d):
+ for feature in features:
+ packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
+ if not packages:
+ packages = d.getVar("PACKAGE_GROUP_%s" % feature)
+ for pkg in (packages or "").split():
+ yield pkg
+
+def required_packages(features, d):
+ req = [feature for feature in features if not is_optional(feature, d)]
+ return packages(req, d)
+
+def optional_packages(features, d):
+ opt = [feature for feature in features if is_optional(feature, d)]
+ return packages(opt, d)
+
+def active_packages(features, d):
+ return itertools.chain(required_packages(features, d),
+ optional_packages(features, d))
+
+def active_recipes(features, d):
+ import oe.packagedata
+
+ for pkg in active_packages(features, d):
+ recipe = oe.packagedata.recipename(pkg, d)
+ if recipe:
+ yield recipe
diff --git a/poky/meta/lib/oe/patch.py b/poky/meta/lib/oe/patch.py
new file mode 100644
index 000000000..af7aa5235
--- /dev/null
+++ b/poky/meta/lib/oe/patch.py
@@ -0,0 +1,895 @@
+import oe.path
+import oe.types
+
+class NotFoundError(bb.BBHandledException):
+ def __init__(self, path):
+ self.path = path
+
+ def __str__(self):
+ return "Error: %s not found." % self.path
+
+class CmdError(bb.BBHandledException):
+ def __init__(self, command, exitstatus, output):
+ self.command = command
+ self.status = exitstatus
+ self.output = output
+
+ def __str__(self):
+ return "Command Error: '%s' exited with %d Output:\n%s" % \
+ (self.command, self.status, self.output)
+
+
+def runcmd(args, dir = None):
+ import pipes
+
+ if dir:
+ olddir = os.path.abspath(os.curdir)
+ if not os.path.exists(dir):
+ raise NotFoundError(dir)
+ os.chdir(dir)
+ # print("cwd: %s -> %s" % (olddir, dir))
+
+ try:
+ args = [ pipes.quote(str(arg)) for arg in args ]
+ cmd = " ".join(args)
+ # print("cmd: %s" % cmd)
+ (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+ if exitstatus != 0:
+ raise CmdError(cmd, exitstatus >> 8, output)
+ if " fuzz " in output:
+ bb.warn("""
+Some of the context lines in patches were ignored. This can lead to incorrectly applied patches.
+The context lines in the patches can be updated with devtool:
+
+ devtool modify <recipe>
+ devtool finish --force-patch-refresh <recipe> <layer_path>
+
+Then the updated patches and the source tree (in devtool's workspace)
+should be reviewed to make sure the patches apply in the correct place
+and don't introduce duplicate lines (which can, and does happen
+when some of the context is ignored). Further information:
+http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+Details:
+{}""".format(output))
+ return output
+
+ finally:
+ if dir:
+ os.chdir(olddir)
+
+class PatchError(Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return "Patch Error: %s" % self.msg
+
+class PatchSet(object):
+ defaults = {
+ "strippath": 1
+ }
+
+ def __init__(self, dir, d):
+ self.dir = dir
+ self.d = d
+ self.patches = []
+ self._current = None
+
+ def current(self):
+ return self._current
+
+ def Clean(self):
+ """
+ Clean out the patch set. Generally includes unapplying all
+ patches and wiping out all associated metadata.
+ """
+ raise NotImplementedError()
+
+ def Import(self, patch, force):
+ if not patch.get("file"):
+ if not patch.get("remote"):
+ raise PatchError("Patch file must be specified in patch import.")
+ else:
+ patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+ for param in PatchSet.defaults:
+ if not patch.get(param):
+ patch[param] = PatchSet.defaults[param]
+
+ if patch.get("remote"):
+ patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
+
+ patch["filemd5"] = bb.utils.md5_file(patch["file"])
+
+ def Push(self, force):
+ raise NotImplementedError()
+
+ def Pop(self, force):
+ raise NotImplementedError()
+
+ def Refresh(self, remote = None, all = None):
+ raise NotImplementedError()
+
+ @staticmethod
+ def getPatchedFiles(patchfile, striplevel, srcdir=None):
+ """
+ Read a patch file and determine which files it will modify.
+ Params:
+ patchfile: the patch file to read
+ striplevel: the strip level at which the patch is going to be applied
+ srcdir: optional path to join onto the patched file paths
+ Returns:
+ A list of tuples of file path and change mode ('A' for add,
+ 'D' for delete or 'M' for modify)
+ """
+
+ def patchedpath(patchline):
+ filepth = patchline.split()[1]
+ if filepth.endswith('/dev/null'):
+ return '/dev/null'
+ filesplit = filepth.split(os.sep)
+ if striplevel > len(filesplit):
+ bb.error('Patch %s has invalid strip level %d' % (patchfile, striplevel))
+ return None
+ return os.sep.join(filesplit[striplevel:])
+
+ for encoding in ['utf-8', 'latin-1']:
+ try:
+ copiedmode = False
+ filelist = []
+ with open(patchfile) as f:
+ for line in f:
+ if line.startswith('--- '):
+ patchpth = patchedpath(line)
+ if not patchpth:
+ break
+ if copiedmode:
+ addedfile = patchpth
+ else:
+ removedfile = patchpth
+ elif line.startswith('+++ '):
+ addedfile = patchedpath(line)
+ if not addedfile:
+ break
+ elif line.startswith('*** '):
+ copiedmode = True
+ removedfile = patchedpath(line)
+ if not removedfile:
+ break
+ else:
+ removedfile = None
+ addedfile = None
+
+ if addedfile and removedfile:
+ if removedfile == '/dev/null':
+ mode = 'A'
+ elif addedfile == '/dev/null':
+ mode = 'D'
+ else:
+ mode = 'M'
+ if srcdir:
+ fullpath = os.path.abspath(os.path.join(srcdir, addedfile))
+ else:
+ fullpath = addedfile
+ filelist.append((fullpath, mode))
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to decode %s' % patchfile)
+
+ return filelist
+
+
+class PatchTree(PatchSet):
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+ self.patchdir = os.path.join(self.dir, 'patches')
+ self.seriespath = os.path.join(self.dir, 'patches', 'series')
+ bb.utils.mkdirhier(self.patchdir)
+
+ def _appendPatchFile(self, patch, strippath):
+ with open(self.seriespath, 'a') as f:
+ f.write(os.path.basename(patch) + "," + strippath + "\n")
+ shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ def _removePatch(self, p):
+ patch = {}
+ patch['file'] = p.split(",")[0]
+ patch['strippath'] = p.split(",")[1]
+ self._applypatch(patch, False, True)
+
+ def _removePatchFile(self, all = False):
+ if not os.path.exists(self.seriespath):
+ return
+ with open(self.seriespath, 'r+') as f:
+ patches = f.readlines()
+ if all:
+ for p in reversed(patches):
+ self._removePatch(os.path.join(self.patchdir, p.strip()))
+ patches = []
+ else:
+ self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
+ patches.pop()
+ with open(self.seriespath, 'w') as f:
+ for p in patches:
+ f.write(p)
+
+ def Import(self, patch, force = None):
+ """"""
+ PatchSet.Import(self, patch, force)
+
+ if self._current is not None:
+ i = self._current + 1
+ else:
+ i = 0
+ self.patches.insert(i, patch)
+
+ def _applypatch(self, patch, force = False, reverse = False, run = True):
+ shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
+ if reverse:
+ shellcmd.append('-R')
+
+ if not run:
+ return "sh" + "-c" + " ".join(shellcmd)
+
+ if not force:
+ shellcmd.append('--dry-run')
+
+ try:
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ if force:
+ return
+
+ shellcmd.pop(len(shellcmd) - 1)
+ output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ except CmdError as err:
+ raise bb.BBHandledException("Applying '%s' failed:\n%s" %
+ (os.path.basename(patch['file']), err.output))
+
+ if not reverse:
+ self._appendPatchFile(patch['file'], patch['strippath'])
+
+ return output
+
+ def Push(self, force = False, all = False, run = True):
+ bb.note("self._current is %s" % self._current)
+ bb.note("patches is %s" % self.patches)
+ if all:
+ for i in self.patches:
+ bb.note("applying patch %s" % i)
+ self._applypatch(i, force)
+ self._current = i
+ else:
+ if self._current is not None:
+ next = self._current + 1
+ else:
+ next = 0
+
+ bb.note("applying patch %s" % self.patches[next])
+ ret = self._applypatch(self.patches[next], force)
+
+ self._current = next
+ return ret
+
+ def Pop(self, force = None, all = None):
+ if all:
+ self._removePatchFile(True)
+ self._current = None
+ else:
+ self._removePatchFile(False)
+
+ if self._current == 0:
+ self._current = None
+
+ if self._current is not None:
+ self._current = self._current - 1
+
+ def Clean(self):
+ """"""
+ self.Pop(all=True)
+
+class GitApplyTree(PatchTree):
+ patch_line_prefix = '%% original patch'
+ ignore_commit_prefix = '%% ignore'
+
+ def __init__(self, dir, d):
+ PatchTree.__init__(self, dir, d)
+ self.commituser = d.getVar('PATCH_GIT_USER_NAME')
+ self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+
+ @staticmethod
+ def extractPatchHeader(patchfile):
+ """
+ Extract just the header lines from the top of a patch file
+ """
+ for encoding in ['utf-8', 'latin-1']:
+ lines = []
+ try:
+ with open(patchfile, 'r', encoding=encoding) as f:
+ for line in f:
+ if line.startswith('Index: ') or line.startswith('diff -') or line.startswith('---'):
+ break
+ lines.append(line)
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to find a character encoding to decode %s' % patchfile)
+ return lines
+
+ @staticmethod
+ def decodeAuthor(line):
+ from email.header import decode_header
+ authorval = line.split(':', 1)[1].strip().replace('"', '')
+ result = decode_header(authorval)[0][0]
+ if hasattr(result, 'decode'):
+ result = result.decode('utf-8')
+ return result
+
+ @staticmethod
+ def interpretPatchHeader(headerlines):
+ import re
+ author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
+ from_commit_re = re.compile('^From [a-z0-9]{40} .*')
+ outlines = []
+ author = None
+ date = None
+ subject = None
+ for line in headerlines:
+ if line.startswith('Subject: '):
+ subject = line.split(':', 1)[1]
+ # Remove any [PATCH][oe-core] etc.
+ subject = re.sub(r'\[.+?\]\s*', '', subject)
+ continue
+ elif line.startswith('From: ') or line.startswith('Author: '):
+ authorval = GitApplyTree.decodeAuthor(line)
+ # git is fussy about author formatting i.e. it must be Name <email@domain>
+ if author_re.match(authorval):
+ author = authorval
+ continue
+ elif line.startswith('Date: '):
+ if date is None:
+ dateval = line.split(':', 1)[1].strip()
+ # Very crude check for date format, since git will blow up if it's not in the right
+ # format. Without e.g. a python-dateutils dependency we can't do a whole lot more
+ if len(dateval) > 12:
+ date = dateval
+ continue
+ elif not author and line.lower().startswith('signed-off-by: '):
+ authorval = GitApplyTree.decodeAuthor(line)
+ # git is fussy about author formatting i.e. it must be Name <email@domain>
+ if author_re.match(authorval):
+ author = authorval
+ elif from_commit_re.match(line):
+ # We don't want the From <commit> line - if it's present it will break rebasing
+ continue
+ outlines.append(line)
+
+ if not subject:
+ firstline = None
+ for line in headerlines:
+ line = line.strip()
+ if firstline:
+ if line:
+ # Second line is not blank, the first line probably isn't usable
+ firstline = None
+ break
+ elif line:
+ firstline = line
+ if firstline and not firstline.startswith(('#', 'Index:', 'Upstream-Status:')) and len(firstline) < 100:
+ subject = firstline
+
+ return outlines, author, date, subject
+
+ @staticmethod
+ def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None):
+ if d:
+ commituser = d.getVar('PATCH_GIT_USER_NAME')
+ commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
+ if commituser:
+ cmd += ['-c', 'user.name="%s"' % commituser]
+ if commitemail:
+ cmd += ['-c', 'user.email="%s"' % commitemail]
+
+ @staticmethod
+ def prepareCommit(patchfile, commituser=None, commitemail=None):
+ """
+ Prepare a git commit command line based on the header from a patch file
+ (typically this is useful for patches that cannot be applied with "git am" due to formatting)
+ """
+ import tempfile
+ # Process patch header and extract useful information
+ lines = GitApplyTree.extractPatchHeader(patchfile)
+ outlines, author, date, subject = GitApplyTree.interpretPatchHeader(lines)
+ if not author or not subject or not date:
+ try:
+ shellcmd = ["git", "log", "--format=email", "--follow", "--diff-filter=A", "--", patchfile]
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], os.path.dirname(patchfile))
+ except CmdError:
+ out = None
+ if out:
+ _, newauthor, newdate, newsubject = GitApplyTree.interpretPatchHeader(out.splitlines())
+ if not author:
+ # If we're setting the author then the date should be set as well
+ author = newauthor
+ date = newdate
+ elif not date:
+ # If we don't do this we'll get the current date, at least this will be closer
+ date = newdate
+ if not subject:
+ subject = newsubject
+ if subject and outlines and not outlines[0].strip() == subject:
+ outlines.insert(0, '%s\n\n' % subject.strip())
+
+ # Write out commit message to a file
+ with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+ tmpfile = tf.name
+ for line in outlines:
+ tf.write(line)
+ # Prepare git command
+ cmd = ["git"]
+ GitApplyTree.gitCommandUserOptions(cmd, commituser, commitemail)
+ cmd += ["commit", "-F", tmpfile]
+ # git doesn't like plain email addresses as authors
+ if author and '<' in author:
+ cmd.append('--author="%s"' % author)
+ if date:
+ cmd.append('--date="%s"' % date)
+ return (tmpfile, cmd)
+
+ @staticmethod
+ def extractPatches(tree, startcommit, outdir, paths=None):
+ import tempfile
+ import shutil
+ import re
+ tempdir = tempfile.mkdtemp(prefix='oepatch')
+ try:
+ shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
+ if paths:
+ shellcmd.append('--')
+ shellcmd.extend(paths)
+ out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
+ if out:
+ for srcfile in out.split():
+ for encoding in ['utf-8', 'latin-1']:
+ patchlines = []
+ outfile = None
+ try:
+ with open(srcfile, 'r', encoding=encoding) as f:
+ for line in f:
+ checkline = line
+ if checkline.startswith('Subject: '):
+ checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
+ if checkline.startswith(GitApplyTree.patch_line_prefix):
+ outfile = line.split()[-1].strip()
+ continue
+ if checkline.startswith(GitApplyTree.ignore_commit_prefix):
+ continue
+ patchlines.append(line)
+ except UnicodeDecodeError:
+ continue
+ break
+ else:
+ raise PatchError('Unable to find a character encoding to decode %s' % srcfile)
+
+ if not outfile:
+ outfile = os.path.basename(srcfile)
+ with open(os.path.join(outdir, outfile), 'w') as of:
+ for line in patchlines:
+ of.write(line)
+ finally:
+ shutil.rmtree(tempdir)
+
+ def _applypatch(self, patch, force = False, reverse = False, run = True):
+ import shutil
+
+ def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
+ if reverse:
+ shellcmd.append('-R')
+
+ shellcmd.append(patch['file'])
+
+ if not run:
+ return "sh" + "-c" + " ".join(shellcmd)
+
+ return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ # Add hooks which add a pointer to the original patch file name in the commit message
+ reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
+ if not reporoot:
+ raise Exception("Cannot get repository root for directory %s" % self.dir)
+ hooks_dir = os.path.join(reporoot, '.git', 'hooks')
+ hooks_dir_backup = hooks_dir + '.devtool-orig'
+ if os.path.lexists(hooks_dir_backup):
+ raise Exception("Git hooks backup directory already exists: %s" % hooks_dir_backup)
+ if os.path.lexists(hooks_dir):
+ shutil.move(hooks_dir, hooks_dir_backup)
+ os.mkdir(hooks_dir)
+ commithook = os.path.join(hooks_dir, 'commit-msg')
+ applyhook = os.path.join(hooks_dir, 'applypatch-msg')
+ with open(commithook, 'w') as f:
+ # NOTE: the formatting here is significant; if you change it you'll also need to
+ # change other places which read it back
+ f.write('echo >> $1\n')
+ f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
+ os.chmod(commithook, 0o755)
+ shutil.copy2(commithook, applyhook)
+ try:
+ patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+ try:
+ shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot]
+ self.gitCommandUserOptions(shellcmd, self.commituser, self.commitemail)
+ shellcmd += ["am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+ return _applypatchhelper(shellcmd, patch, force, reverse, run)
+ except CmdError:
+ # Need to abort the git am, or we'll still be within it at the end
+ try:
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "am", "--abort"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ except CmdError:
+ pass
+ # git am won't always clean up after itself, sadly, so...
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "reset", "--hard", "HEAD"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Also need to take care of any stray untracked files
+ shellcmd = ["git", "--work-tree=%s" % reporoot, "clean", "-f"]
+ runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+ # Fall back to git apply
+ shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']]
+ try:
+ output = _applypatchhelper(shellcmd, patch, force, reverse, run)
+ except CmdError:
+ # Fall back to patch
+ output = PatchTree._applypatch(self, patch, force, reverse, run)
+ # Add all files
+ shellcmd = ["git", "add", "-f", "-A", "."]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Exclude the patches directory
+ shellcmd = ["git", "reset", "HEAD", self.patchdir]
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ # Commit the result
+ (tmpfile, shellcmd) = self.prepareCommit(patch['file'], self.commituser, self.commitemail)
+ try:
+ shellcmd.insert(0, patchfilevar)
+ output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+ finally:
+ os.remove(tmpfile)
+ return output
+ finally:
+ shutil.rmtree(hooks_dir)
+ if os.path.lexists(hooks_dir_backup):
+ shutil.move(hooks_dir_backup, hooks_dir)
+
+
+class QuiltTree(PatchSet):
+ def _runcmd(self, args, run = True):
+ quiltrc = self.d.getVar('QUILTRCFILE')
+ if not run:
+ return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
+ runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
+
+ def _quiltpatchpath(self, file):
+ return os.path.join(self.dir, "patches", os.path.basename(file))
+
+
+ def __init__(self, dir, d):
+ PatchSet.__init__(self, dir, d)
+ self.initialized = False
+ p = os.path.join(self.dir, 'patches')
+ if not os.path.exists(p):
+ os.makedirs(p)
+
+ def Clean(self):
+ try:
+ self._runcmd(["pop", "-a", "-f"])
+ oe.path.remove(os.path.join(self.dir, "patches","series"))
+ except Exception:
+ pass
+ self.initialized = True
+
+ def InitFromDir(self):
+ # read series -> self.patches
+ seriespath = os.path.join(self.dir, 'patches', 'series')
+ if not os.path.exists(self.dir):
+ raise NotFoundError(self.dir)
+ if os.path.exists(seriespath):
+ with open(seriespath, 'r') as f:
+ for line in f.readlines():
+ patch = {}
+ parts = line.strip().split()
+ patch["quiltfile"] = self._quiltpatchpath(parts[0])
+ patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+ if len(parts) > 1:
+ patch["strippath"] = parts[1][2:]
+ self.patches.append(patch)
+
+ # determine which patches are applied -> self._current
+ try:
+ output = runcmd(["quilt", "applied"], self.dir)
+ except CmdError:
+ import sys
+ if sys.exc_value.output.strip() == "No patches applied":
+ return
+ else:
+ raise
+ output = [val for val in output.split('\n') if not val.startswith('#')]
+ for patch in self.patches:
+ if os.path.basename(patch["quiltfile"]) == output[-1]:
+ self._current = self.patches.index(patch)
+ self.initialized = True
+
+ def Import(self, patch, force = None):
+ if not self.initialized:
+ self.InitFromDir()
+ PatchSet.Import(self, patch, force)
+ oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
+ with open(os.path.join(self.dir, "patches", "series"), "a") as f:
+ f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"] + "\n")
+ patch["quiltfile"] = self._quiltpatchpath(patch["file"])
+ patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+
+ # TODO: determine if the file being imported:
+ # 1) is already imported, and is the same
+ # 2) is already imported, but differs
+
+ self.patches.insert(self._current or 0, patch)
+
+
+ def Push(self, force = False, all = False, run = True):
+ # quilt push [-f]
+
+ args = ["push"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+ if not run:
+ return self._runcmd(args, run)
+
+ self._runcmd(args)
+
+ if self._current is not None:
+ self._current = self._current + 1
+ else:
+ self._current = 0
+
+ def Pop(self, force = None, all = None):
+ # quilt pop [-f]
+ args = ["pop"]
+ if force:
+ args.append("-f")
+ if all:
+ args.append("-a")
+
+ self._runcmd(args)
+
+ if self._current == 0:
+ self._current = None
+
+ if self._current is not None:
+ self._current = self._current - 1
+
+ def Refresh(self, **kwargs):
+ if kwargs.get("remote"):
+ patch = self.patches[kwargs["patch"]]
+ if not patch:
+ raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
+ (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
+ if type == "file":
+ import shutil
+ if not patch.get("file") and patch.get("remote"):
+ patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+ shutil.copyfile(patch["quiltfile"], patch["file"])
+ else:
+ raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
+ else:
+ # quilt refresh
+ args = ["refresh"]
+ if kwargs.get("quiltfile"):
+ args.append(os.path.basename(kwargs["quiltfile"]))
+ elif kwargs.get("patch"):
+ args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
+ self._runcmd(args)
+
+class Resolver(object):
+ def __init__(self, patchset, terminal):
+ raise NotImplementedError()
+
+ def Resolve(self):
+ raise NotImplementedError()
+
+ def Revert(self):
+ raise NotImplementedError()
+
+ def Finalize(self):
+ raise NotImplementedError()
+
+class NOOPResolver(Resolver):
+ def __init__(self, patchset, terminal):
+ self.patchset = patchset
+ self.terminal = terminal
+
+ def Resolve(self):
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push()
+ except Exception:
+ import sys
+ os.chdir(olddir)
+ raise
+
+# Patch resolver which relies on the user doing all the work involved in the
+# resolution, with the exception of refreshing the remote copy of the patch
+# files (the urls).
+class UserResolver(Resolver):
+ def __init__(self, patchset, terminal):
+ self.patchset = patchset
+ self.terminal = terminal
+
+ # Force a push in the patchset, then drop to a shell for the user to
+ # resolve any rejected hunks
+ def Resolve(self):
+ olddir = os.path.abspath(os.curdir)
+ os.chdir(self.patchset.dir)
+ try:
+ self.patchset.Push(False)
+ except CmdError as v:
+ # Patch application failed
+ patchcmd = self.patchset.Push(True, False, False)
+
+ t = self.patchset.d.getVar('T')
+ if not t:
+ bb.msg.fatal("Build", "T not set")
+ bb.utils.mkdirhier(t)
+ import random
+ rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
+ with open(rcfile, "w") as f:
+ f.write("echo '*** Manual patch resolution mode ***'\n")
+ f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
+ f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
+ f.write("echo ''\n")
+ f.write(" ".join(patchcmd) + "\n")
+ os.chmod(rcfile, 0o775)
+
+ self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
+
+ # Construct a new PatchSet after the user's changes, compare the
+ # sets, checking patches for modifications, and doing a remote
+ # refresh on each.
+ oldpatchset = self.patchset
+ self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
+
+ for patch in self.patchset.patches:
+ oldpatch = None
+ for opatch in oldpatchset.patches:
+ if opatch["quiltfile"] == patch["quiltfile"]:
+ oldpatch = opatch
+
+ if oldpatch:
+ patch["remote"] = oldpatch["remote"]
+ if patch["quiltfile"] == oldpatch["quiltfile"]:
+ if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
+ bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
+ # user change? remote refresh
+ self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
+ else:
+ # User did not fix the problem. Abort.
+ raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
+ except Exception:
+ os.chdir(olddir)
+ raise
+ os.chdir(olddir)
+
+
+def patch_path(url, fetch, workdir, expand=True):
+ """Return the local path of a patch, or None if this isn't a patch"""
+
+ local = fetch.localpath(url)
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.xz', '.Z'):
+ if expand:
+ local = os.path.join(workdir, base)
+ ext = os.path.splitext(base)[1]
+
+ urldata = fetch.ud[url]
+ if "apply" in urldata.parm:
+ apply = oe.types.boolean(urldata.parm["apply"])
+ if not apply:
+ return
+ elif ext not in (".diff", ".patch"):
+ return
+
+ return local
+
+def src_patches(d, all=False, expand=True):
+ workdir = d.getVar('WORKDIR')
+ fetch = bb.fetch2.Fetch([], d)
+ patches = []
+ sources = []
+ for url in fetch.urls:
+ local = patch_path(url, fetch, workdir, expand)
+ if not local:
+ if all:
+ local = fetch.localpath(url)
+ sources.append(local)
+ continue
+
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ patchname = parm.get('pname') or os.path.basename(local)
+
+ apply, reason = should_apply(parm, d)
+ if not apply:
+ if reason:
+ bb.note("Patch %s %s" % (patchname, reason))
+ continue
+
+ patchparm = {'patchname': patchname}
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
+ else:
+ striplevel = '1'
+ patchparm['striplevel'] = striplevel
+
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ patchparm['patchdir'] = patchdir
+
+ localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
+ patches.append(localurl)
+
+ if all:
+ return sources
+
+ return patches
+
+
+def should_apply(parm, d):
+ if "mindate" in parm or "maxdate" in parm:
+ pn = d.getVar('PN')
+ srcdate = d.getVar('SRCDATE_%s' % pn)
+ if not srcdate:
+ srcdate = d.getVar('SRCDATE')
+
+ if srcdate == "now":
+ srcdate = d.getVar('DATE')
+
+ if "maxdate" in parm and parm["maxdate"] < srcdate:
+ return False, 'is outdated'
+
+ if "mindate" in parm and parm["mindate"] > srcdate:
+ return False, 'is predated'
+
+
+ if "minrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev < parm["minrev"]:
+ return False, 'applies to later revisions'
+
+ if "maxrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and srcrev > parm["maxrev"]:
+ return False, 'applies to earlier revisions'
+
+ if "rev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["rev"] not in srcrev:
+ return False, "doesn't apply to revision"
+
+ if "notrev" in parm:
+ srcrev = d.getVar('SRCREV')
+ if srcrev and parm["notrev"] in srcrev:
+ return False, "doesn't apply to revision"
+
+ return True, None
+
diff --git a/poky/meta/lib/oe/path.py b/poky/meta/lib/oe/path.py
new file mode 100644
index 000000000..76c58fa76
--- /dev/null
+++ b/poky/meta/lib/oe/path.py
@@ -0,0 +1,261 @@
+import errno
+import glob
+import shutil
+import subprocess
+import os.path
+
+def join(*paths):
+ """Like os.path.join but doesn't treat absolute RHS specially"""
+ return os.path.normpath("/".join(paths))
+
+def relative(src, dest):
+ """ Return a relative path from src to dest.
+
+ >>> relative("/usr/bin", "/tmp/foo/bar")
+ ../../tmp/foo/bar
+
+ >>> relative("/usr/bin", "/usr/lib")
+ ../lib
+
+ >>> relative("/tmp", "/tmp/foo/bar")
+ foo/bar
+ """
+
+ return os.path.relpath(dest, src)
+
+def make_relative_symlink(path):
+ """ Convert an absolute symlink to a relative one """
+ if not os.path.islink(path):
+ return
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ return
+
+ # find the common ancestor directory
+ ancestor = path
+ depth = 0
+ while ancestor and not link.startswith(ancestor):
+ ancestor = ancestor.rpartition('/')[0]
+ depth += 1
+
+ if not ancestor:
+ print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
+ return
+
+ base = link.partition(ancestor)[2].strip('/')
+ while depth > 1:
+ base = "../" + base
+ depth -= 1
+
+ os.remove(path)
+ os.symlink(base, path)
+
+def replace_absolute_symlinks(basedir, d):
+ """
+ Walk basedir looking for absolute symlinks and replacing them with relative ones.
+ The absolute links are assumed to be relative to basedir
+ (compared to make_relative_symlink above which tries to compute common ancestors
+ using pattern matching instead)
+ """
+ for walkroot, dirs, files in os.walk(basedir):
+ for file in files + dirs:
+ path = os.path.join(walkroot, file)
+ if not os.path.islink(path):
+ continue
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ continue
+ walkdir = os.path.dirname(path.rpartition(basedir)[2])
+ base = os.path.relpath(link, walkdir)
+ bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
+ os.remove(path)
+ os.symlink(base, path)
+
+def format_display(path, metadata):
+ """ Prepare a path for display to the user. """
+ rel = relative(metadata.getVar("TOPDIR"), path)
+ if len(rel) > len(path):
+ return path
+ else:
+ return rel
+
+def copytree(src, dst):
+ # We could use something like shutil.copytree here but it turns out to
+ # to be slow. It takes twice as long copying to an empty directory.
+ # If dst already has contents performance can be 15 time slower
+ # This way we also preserve hardlinks between files in the tree.
+
+ bb.utils.mkdirhier(dst)
+ cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+def copyhardlinktree(src, dst):
+ """ Make the hard link when possible, otherwise copy. """
+ bb.utils.mkdirhier(dst)
+ if os.path.isdir(src) and not len(os.listdir(src)):
+ return
+
+ if (os.stat(src).st_dev == os.stat(dst).st_dev):
+ # Need to copy directories only with tar first since cp will error if two
+ # writers try and create a directory at the same time
+ cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
+ subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+ source = ''
+ if os.path.isdir(src):
+ if len(glob.glob('%s/.??*' % src)) > 0:
+ source = './.??* '
+ source += './*'
+ s_dir = src
+ else:
+ source = src
+ s_dir = os.getcwd()
+ cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst))
+ subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT)
+ else:
+ copytree(src, dst)
+
+def remove(path, recurse=True):
+ """
+ Equivalent to rm -f or rm -rf
+ NOTE: be careful about passing paths that may contain filenames with
+ wildcards in them (as opposed to passing an actual wildcarded path) -
+ since we use glob.glob() to expand the path. Filenames containing
+ square brackets are particularly problematic since the they may not
+ actually expand to match the original filename.
+ """
+ for name in glob.glob(path):
+ try:
+ os.unlink(name)
+ except OSError as exc:
+ if recurse and exc.errno == errno.EISDIR:
+ shutil.rmtree(name)
+ elif exc.errno != errno.ENOENT:
+ raise
+
+def symlink(source, destination, force=False):
+ """Create a symbolic link"""
+ try:
+ if force:
+ remove(destination)
+ os.symlink(source, destination)
+ except OSError as e:
+ if e.errno != errno.EEXIST or os.readlink(destination) != source:
+ raise
+
+def find(dir, **walkoptions):
+ """ Given a directory, recurses into that directory,
+ returning all files as absolute paths. """
+
+ for root, dirs, files in os.walk(dir, **walkoptions):
+ for file in files:
+ yield os.path.join(root, file)
+
+
+## realpath() related functions
+def __is_path_below(file, root):
+ return (file + os.path.sep).startswith(root)
+
+def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
+ """Calculates real path of symlink 'start' + 'rel_path' below
+ 'root'; no part of 'start' below 'root' must contain symlinks. """
+ have_dir = True
+
+ for d in rel_path.split(os.path.sep):
+ if not have_dir and not assume_dir:
+ raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+ if d == os.path.pardir: # '..'
+ if len(start) >= len(root):
+ # do not follow '..' before root
+ start = os.path.dirname(start)
+ else:
+ # emit warning?
+ pass
+ else:
+ (start, have_dir) = __realpath(os.path.join(start, d),
+ root, loop_cnt, assume_dir)
+
+ assert(__is_path_below(start, root))
+
+ return start
+
+def __realpath(file, root, loop_cnt, assume_dir):
+ while os.path.islink(file) and len(file) >= len(root):
+ if loop_cnt == 0:
+ raise OSError(errno.ELOOP, file)
+
+ loop_cnt -= 1
+ target = os.path.normpath(os.readlink(file))
+
+ if not os.path.isabs(target):
+ tdir = os.path.dirname(file)
+ assert(__is_path_below(tdir, root))
+ else:
+ tdir = root
+
+ file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+ try:
+ is_dir = os.path.isdir(file)
+ except:
+ is_dir = false
+
+ return (file, is_dir)
+
+def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+ """ Returns the canonical path of 'file' with assuming a
+ toplevel 'root' directory. When 'use_physdir' is set, all
+ preceding path components of 'file' will be resolved first;
+ this flag should be set unless it is guaranteed that there is
+ no symlink in the path. When 'assume_dir' is not set, missing
+ path components will raise an ENOENT error"""
+
+ root = os.path.normpath(root)
+ file = os.path.normpath(file)
+
+ if not root.endswith(os.path.sep):
+ # letting root end with '/' makes some things easier
+ root = root + os.path.sep
+
+ if not __is_path_below(file, root):
+ raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+ try:
+ if use_physdir:
+ file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+ else:
+ file = __realpath(file, root, loop_cnt, assume_dir)[0]
+ except OSError as e:
+ if e.errno == errno.ELOOP:
+ # make ELOOP more readable; without catching it, there will
+ # be printed a backtrace with 100s of OSError exceptions
+ # else
+ raise OSError(errno.ELOOP,
+ "too much recursions while resolving '%s'; loop in '%s'" %
+ (file, e.strerror))
+
+ raise
+
+ return file
+
+def is_path_parent(possible_parent, *paths):
+ """
+ Return True if a path is the parent of another, False otherwise.
+ Multiple paths to test can be specified in which case all
+ specified test paths must be under the parent in order to
+ return True.
+ """
+ def abs_path_trailing(pth):
+ pth_abs = os.path.abspath(pth)
+ if not pth_abs.endswith(os.sep):
+ pth_abs += os.sep
+ return pth_abs
+
+ possible_parent_abs = abs_path_trailing(possible_parent)
+ if not paths:
+ return False
+ for path in paths:
+ path_abs = abs_path_trailing(path)
+ if not path_abs.startswith(possible_parent_abs):
+ return False
+ return True
diff --git a/poky/meta/lib/oe/prservice.py b/poky/meta/lib/oe/prservice.py
new file mode 100644
index 000000000..32dfc15e8
--- /dev/null
+++ b/poky/meta/lib/oe/prservice.py
@@ -0,0 +1,126 @@
+
+def prserv_make_conn(d, check = False):
+ import prserv.serv
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
+ try:
+ conn = None
+ conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+ if check:
+ if not conn.ping():
+ raise Exception('service not available')
+ d.setVar("__PRSERV_CONN",conn)
+ except Exception as exc:
+ bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
+
+ return conn
+
+def prserv_dump_db(d):
+ if not d.getVar('PRSERV_HOST'):
+ bb.error("Not using network based PR service")
+ return None
+
+ conn = d.getVar("__PRSERV_CONN")
+ if conn is None:
+ conn = prserv_make_conn(d)
+ if conn is None:
+ bb.error("Making connection failed to remote PR service")
+ return None
+
+ #dump db
+ opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
+ opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
+ opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
+ opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
+ return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+
+def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
+ if not d.getVar('PRSERV_HOST'):
+ bb.error("Not using network based PR service")
+ return None
+
+ conn = d.getVar("__PRSERV_CONN")
+ if conn is None:
+ conn = prserv_make_conn(d)
+ if conn is None:
+ bb.error("Making connection failed to remote PR service")
+ return None
+ #get the entry values
+ imported = []
+ prefix = "PRAUTO$"
+ for v in d.keys():
+ if v.startswith(prefix):
+ (remain, sep, checksum) = v.rpartition('$')
+ (remain, sep, pkgarch) = remain.rpartition('$')
+ (remain, sep, version) = remain.rpartition('$')
+ if (remain + '$' != prefix) or \
+ (filter_version and filter_version != version) or \
+ (filter_pkgarch and filter_pkgarch != pkgarch) or \
+ (filter_checksum and filter_checksum != checksum):
+ continue
+ try:
+ value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
+ except BaseException as exc:
+ bb.debug("Not valid value of %s:%s" % (v,str(exc)))
+ continue
+ ret = conn.importone(version,pkgarch,checksum,value)
+ if ret != value:
+ bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
+ else:
+ imported.append((version,pkgarch,checksum,value))
+ return imported
+
+def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
+ import bb.utils
+ #initilize the output file
+ bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
+ df = d.getVar('PRSERV_DUMPFILE')
+ #write data
+ lf = bb.utils.lockfile("%s.lock" % df)
+ f = open(df, "a")
+ if metainfo:
+ #dump column info
+ f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
+ f.write("#Table: %s\n" % metainfo['tbl_name'])
+ f.write("#Columns:\n")
+ f.write("#name \t type \t notn \t dflt \t pk\n")
+ f.write("#----------\t --------\t --------\t --------\t ----\n")
+ for i in range(len(metainfo['col_info'])):
+ f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" %
+ (metainfo['col_info'][i]['name'],
+ metainfo['col_info'][i]['type'],
+ metainfo['col_info'][i]['notnull'],
+ metainfo['col_info'][i]['dflt_value'],
+ metainfo['col_info'][i]['pk']))
+ f.write("\n")
+
+ if lockdown:
+ f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
+
+ if datainfo:
+ idx = {}
+ for i in range(len(datainfo)):
+ pkgarch = datainfo[i]['pkgarch']
+ value = datainfo[i]['value']
+ if pkgarch not in idx:
+ idx[pkgarch] = i
+ elif value > datainfo[idx[pkgarch]]['value']:
+ idx[pkgarch] = i
+ f.write("PRAUTO$%s$%s$%s = \"%s\"\n" %
+ (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
+ if not nomax:
+ for i in idx:
+ f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
+ f.close()
+ bb.utils.unlockfile(lf)
+
+def prserv_check_avail(d):
+ host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
+ try:
+ if len(host_params) != 2:
+ raise TypeError
+ else:
+ int(host_params[1])
+ except TypeError:
+ bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
+ else:
+ prserv_make_conn(d, True)
diff --git a/poky/meta/lib/oe/qa.py b/poky/meta/lib/oe/qa.py
new file mode 100644
index 000000000..3231e60ce
--- /dev/null
+++ b/poky/meta/lib/oe/qa.py
@@ -0,0 +1,171 @@
+import os, struct, mmap
+
+class NotELFFileError(Exception):
+ pass
+
+class ELFFile:
+ EI_NIDENT = 16
+
+ EI_CLASS = 4
+ EI_DATA = 5
+ EI_VERSION = 6
+ EI_OSABI = 7
+ EI_ABIVERSION = 8
+
+ E_MACHINE = 0x12
+
+ # possible values for EI_CLASS
+ ELFCLASSNONE = 0
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+
+ # possible value for EI_VERSION
+ EV_CURRENT = 1
+
+ # possible values for EI_DATA
+ EI_DATA_NONE = 0
+ EI_DATA_LSB = 1
+ EI_DATA_MSB = 2
+
+ PT_INTERP = 3
+
+ def my_assert(self, expectation, result):
+ if not expectation == result:
+ #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
+ raise NotELFFileError("%s is not an ELF" % self.name)
+
+ def __init__(self, name):
+ self.name = name
+ self.objdump_output = {}
+
+ # Context Manager functions to close the mmap explicitly
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.data.close()
+
+ def open(self):
+ with open(self.name, "rb") as f:
+ try:
+ self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
+ except ValueError:
+ # This means the file is empty
+ raise NotELFFileError("%s is empty" % self.name)
+
+ # Check the file has the minimum number of ELF table entries
+ if len(self.data) < ELFFile.EI_NIDENT + 4:
+ raise NotELFFileError("%s is not an ELF" % self.name)
+
+ # ELF header
+ self.my_assert(self.data[0], 0x7f)
+ self.my_assert(self.data[1], ord('E'))
+ self.my_assert(self.data[2], ord('L'))
+ self.my_assert(self.data[3], ord('F'))
+ if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32:
+ self.bits = 32
+ elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64:
+ self.bits = 64
+ else:
+ # Not 32-bit or 64.. lets assert
+ raise NotELFFileError("ELF but not 32 or 64 bit.")
+ self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT)
+
+ self.endian = self.data[ELFFile.EI_DATA]
+ if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB):
+ raise NotELFFileError("Unexpected EI_DATA %x" % self.endian)
+
+ def osAbi(self):
+ return self.data[ELFFile.EI_OSABI]
+
+ def abiVersion(self):
+ return self.data[ELFFile.EI_ABIVERSION]
+
+ def abiSize(self):
+ return self.bits
+
+ def isLittleEndian(self):
+ return self.endian == ELFFile.EI_DATA_LSB
+
+ def isBigEndian(self):
+ return self.endian == ELFFile.EI_DATA_MSB
+
+ def getStructEndian(self):
+ return {ELFFile.EI_DATA_LSB: "<",
+ ELFFile.EI_DATA_MSB: ">"}[self.endian]
+
+ def getShort(self, offset):
+ return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0]
+
+ def getWord(self, offset):
+ return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0]
+
+ def isDynamic(self):
+ """
+ Return True if there is a .interp segment (therefore dynamically
+ linked), otherwise False (statically linked).
+ """
+ offset = self.getWord(self.bits == 32 and 0x1C or 0x20)
+ size = self.getShort(self.bits == 32 and 0x2A or 0x36)
+ count = self.getShort(self.bits == 32 and 0x2C or 0x38)
+
+ for i in range(0, count):
+ p_type = self.getWord(offset + i * size)
+ if p_type == ELFFile.PT_INTERP:
+ return True
+ return False
+
+ def machine(self):
+ """
+ We know the endian stored in self.endian and we
+ know the position
+ """
+ return self.getShort(ELFFile.E_MACHINE)
+
+ def run_objdump(self, cmd, d):
+ import bb.process
+ import sys
+
+ if cmd in self.objdump_output:
+ return self.objdump_output[cmd]
+
+ objdump = d.getVar('OBJDUMP')
+
+ env = os.environ.copy()
+ env["LC_ALL"] = "C"
+ env["PATH"] = d.getVar('PATH')
+
+ try:
+ bb.note("%s %s %s" % (objdump, cmd, self.name))
+ self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
+ return self.objdump_output[cmd]
+ except Exception as e:
+ bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
+ return ""
+
+def elf_machine_to_string(machine):
+ """
+ Return the name of a given ELF e_machine field or the hex value as a string
+ if it isn't recognised.
+ """
+ try:
+ return {
+ 0x02: "SPARC",
+ 0x03: "x86",
+ 0x08: "MIPS",
+ 0x14: "PowerPC",
+ 0x28: "ARM",
+ 0x2A: "SuperH",
+ 0x32: "IA-64",
+ 0x3E: "x86-64",
+ 0xB7: "AArch64"
+ }[machine]
+ except:
+ return "Unknown (%s)" % repr(machine)
+
+if __name__ == "__main__":
+ import sys
+
+ with ELFFile(sys.argv[1]) as elf:
+ elf.open()
+ print(elf.isDynamic())
diff --git a/poky/meta/lib/oe/recipeutils.py b/poky/meta/lib/oe/recipeutils.py
new file mode 100644
index 000000000..aa64553c0
--- /dev/null
+++ b/poky/meta/lib/oe/recipeutils.py
@@ -0,0 +1,971 @@
+# Utility functions for reading and modifying recipes
+#
+# Some code borrowed from the OE layer index
+#
+# Copyright (C) 2013-2017 Intel Corporation
+#
+
+import sys
+import os
+import os.path
+import tempfile
+import textwrap
+import difflib
+from . import utils
+import shutil
+import re
+import fnmatch
+import glob
+from collections import OrderedDict, defaultdict
+
+
+# Help us to find places to insert values
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
+# Variables that sometimes are a bit long but shouldn't be wrapped
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]']
+list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
+meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
+
+
+def pn_to_recipe(cooker, pn, mc=''):
+ """Convert a recipe name (PN) to the path to the recipe file"""
+
+ best = cooker.findBestProvider(pn, mc)
+ return best[3]
+
+
+def get_unavailable_reasons(cooker, pn):
+ """If a recipe could not be found, find out why if possible"""
+ import bb.taskdata
+ taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
+ return taskdata.get_reasons(pn)
+
+
+def parse_recipe(cooker, fn, appendfiles):
+ """
+ Parse an individual recipe file, optionally with a list of
+ bbappend files.
+ """
+ import bb.cache
+ parser = bb.cache.NoCache(cooker.databuilder)
+ envdata = parser.loadDataFull(fn, appendfiles)
+ return envdata
+
+
+def get_var_files(fn, varlist, d):
+ """Find the file in which each of a list of variables is set.
+ Note: requires variable history to be enabled when parsing.
+ """
+ varfiles = {}
+ for v in varlist:
+ history = d.varhistory.variable(v)
+ files = []
+ for event in history:
+ if 'file' in event and not 'flag' in event:
+ files.append(event['file'])
+ if files:
+ actualfile = files[-1]
+ else:
+ actualfile = None
+ varfiles[v] = actualfile
+
+ return varfiles
+
+
+def split_var_value(value, assignment=True):
+ """
+ Split a space-separated variable's value into a list of items,
+ taking into account that some of the items might be made up of
+ expressions containing spaces that should not be split.
+ Parameters:
+ value:
+ The string value to split
+ assignment:
+ True to assume that the value represents an assignment
+ statement, False otherwise. If True, and an assignment
+ statement is passed in the first item in
+ the returned list will be the part of the assignment
+ statement up to and including the opening quote character,
+ and the last item will be the closing quote.
+ """
+ inexpr = 0
+ lastchar = None
+ out = []
+ buf = ''
+ for char in value:
+ if char == '{':
+ if lastchar == '$':
+ inexpr += 1
+ elif char == '}':
+ inexpr -= 1
+ elif assignment and char in '"\'' and inexpr == 0:
+ if buf:
+ out.append(buf)
+ out.append(char)
+ char = ''
+ buf = ''
+ elif char.isspace() and inexpr == 0:
+ char = ''
+ if buf:
+ out.append(buf)
+ buf = ''
+ buf += char
+ lastchar = char
+ if buf:
+ out.append(buf)
+
+ # Join together assignment statement and opening quote
+ outlist = out
+ if assignment:
+ assigfound = False
+ for idx, item in enumerate(out):
+ if '=' in item:
+ assigfound = True
+ if assigfound:
+ if '"' in item or "'" in item:
+ outlist = [' '.join(out[:idx+1])]
+ outlist.extend(out[idx+1:])
+ break
+ return outlist
+
+
+def patch_recipe_lines(fromlines, values, trailing_newline=True):
+ """Update or insert variable values into lines from a recipe.
+ Note that some manual inspection/intervention may be required
+ since this cannot handle all situations.
+ """
+
+ import bb.utils
+
+ if trailing_newline:
+ newline = '\n'
+ else:
+ newline = ''
+
+ nowrap_vars_res = []
+ for item in nowrap_vars:
+ nowrap_vars_res.append(re.compile('^%s$' % item))
+
+ recipe_progression_res = []
+ recipe_progression_restrs = []
+ for item in recipe_progression:
+ if item.endswith('()'):
+ key = item[:-2]
+ else:
+ key = item
+ restr = '%s(_[a-zA-Z0-9-_$(){}]+|\[[^\]]*\])?' % key
+ if item.endswith('()'):
+ recipe_progression_restrs.append(restr + '()')
+ else:
+ recipe_progression_restrs.append(restr)
+ recipe_progression_res.append(re.compile('^%s$' % restr))
+
+ def get_recipe_pos(variable):
+ for i, p in enumerate(recipe_progression_res):
+ if p.match(variable):
+ return i
+ return -1
+
+ remainingnames = {}
+ for k in values.keys():
+ remainingnames[k] = get_recipe_pos(k)
+ remainingnames = OrderedDict(sorted(remainingnames.items(), key=lambda x: x[1]))
+
+ modifying = False
+
+ def outputvalue(name, lines, rewindcomments=False):
+ if values[name] is None:
+ return
+ rawtext = '%s = "%s"%s' % (name, values[name], newline)
+ addlines = []
+ nowrap = False
+ for nowrap_re in nowrap_vars_res:
+ if nowrap_re.match(name):
+ nowrap = True
+ break
+ if nowrap:
+ addlines.append(rawtext)
+ elif name in list_vars:
+ splitvalue = split_var_value(values[name], assignment=False)
+ if len(splitvalue) > 1:
+ linesplit = ' \\\n' + (' ' * (len(name) + 4))
+ addlines.append('%s = "%s%s"%s' % (name, linesplit.join(splitvalue), linesplit, newline))
+ else:
+ addlines.append(rawtext)
+ else:
+ wrapped = textwrap.wrap(rawtext)
+ for wrapline in wrapped[:-1]:
+ addlines.append('%s \\%s' % (wrapline, newline))
+ addlines.append('%s%s' % (wrapped[-1], newline))
+
+ # Split on newlines - this isn't strictly necessary if you are only
+ # going to write the output to disk, but if you want to compare it
+ # (as patch_recipe_file() will do if patch=True) then it's important.
+ addlines = [line for l in addlines for line in l.splitlines(True)]
+ if rewindcomments:
+ # Ensure we insert the lines before any leading comments
+ # (that we'd want to ensure remain leading the next value)
+ for i, ln in reversed(list(enumerate(lines))):
+ if not ln.startswith('#'):
+ lines[i+1:i+1] = addlines
+ break
+ else:
+ lines.extend(addlines)
+ else:
+ lines.extend(addlines)
+
+ existingnames = []
+ def patch_recipe_varfunc(varname, origvalue, op, newlines):
+ if modifying:
+ # Insert anything that should come before this variable
+ pos = get_recipe_pos(varname)
+ for k in list(remainingnames):
+ if remainingnames[k] > -1 and pos >= remainingnames[k] and not k in existingnames:
+ outputvalue(k, newlines, rewindcomments=True)
+ del remainingnames[k]
+ # Now change this variable, if it needs to be changed
+ if varname in existingnames and op in ['+=', '=', '=+']:
+ if varname in remainingnames:
+ outputvalue(varname, newlines)
+ del remainingnames[varname]
+ return None, None, 0, True
+ else:
+ if varname in values:
+ existingnames.append(varname)
+ return origvalue, None, 0, True
+
+ # First run - establish which values we want to set are already in the file
+ varlist = [re.escape(item) for item in values.keys()]
+ bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc)
+ # Second run - actually set everything
+ modifying = True
+ varlist.extend(recipe_progression_restrs)
+ changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True)
+
+ if remainingnames:
+ if tolines and tolines[-1].strip() != '':
+ tolines.append('\n')
+ for k in remainingnames.keys():
+ outputvalue(k, tolines)
+
+ return changed, tolines
+
+
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
+ """Update or insert variable values into a recipe file (assuming you
+ have already identified the exact file you want to update.)
+ Note that some manual inspection/intervention may be required
+ since this cannot handle all situations.
+ """
+
+ with open(fn, 'r') as f:
+ fromlines = f.readlines()
+
+ _, tolines = patch_recipe_lines(fromlines, values)
+
+ if redirect_output:
+ with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+ f.writelines(tolines)
+ return None
+ elif patch:
+ relfn = os.path.relpath(fn, relpath)
+ diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
+ return diff
+ else:
+ with open(fn, 'w') as f:
+ f.writelines(tolines)
+ return None
+
+
+def localise_file_vars(fn, varfiles, varlist):
+ """Given a list of variables and variable history (fetched with get_var_files())
+ find where each variable should be set/changed. This handles for example where a
+ recipe includes an inc file where variables might be changed - in most cases
+ we want to update the inc file when changing the variable value rather than adding
+ it to the recipe itself.
+ """
+ fndir = os.path.dirname(fn) + os.sep
+
+ first_meta_file = None
+ for v in meta_vars:
+ f = varfiles.get(v, None)
+ if f:
+ actualdir = os.path.dirname(f) + os.sep
+ if actualdir.startswith(fndir):
+ first_meta_file = f
+ break
+
+ filevars = defaultdict(list)
+ for v in varlist:
+ f = varfiles[v]
+ # Only return files that are in the same directory as the recipe or in some directory below there
+ # (this excludes bbclass files and common inc files that wouldn't be appropriate to set the variable
+ # in if we were going to set a value specific to this recipe)
+ if f:
+ actualfile = f
+ else:
+ # Variable isn't in a file, if it's one of the "meta" vars, use the first file with a meta var in it
+ if first_meta_file:
+ actualfile = first_meta_file
+ else:
+ actualfile = fn
+
+ actualdir = os.path.dirname(actualfile) + os.sep
+ if not actualdir.startswith(fndir):
+ actualfile = fn
+ filevars[actualfile].append(v)
+
+ return filevars
+
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
+ """Modify a list of variable values in the specified recipe. Handles inc files if
+ used by the recipe.
+ """
+ varlist = varvalues.keys()
+ varfiles = get_var_files(fn, varlist, d)
+ locs = localise_file_vars(fn, varfiles, varlist)
+ patches = []
+ for f,v in locs.items():
+ vals = {k: varvalues[k] for k in v}
+ patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
+ if patch:
+ patches.append(patchdata)
+
+ if patch:
+ return patches
+ else:
+ return None
+
+
+
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True, all_variants=False):
+ """Copy (local) recipe files, including both files included via include/require,
+ and files referred to in the SRC_URI variable."""
+ import bb.fetch2
+ import oe.path
+
+ # FIXME need a warning if the unexpanded SRC_URI value contains variable references
+
+ uri_values = []
+ localpaths = []
+ def fetch_urls(rdata):
+ # Collect the local paths from SRC_URI
+ srcuri = rdata.getVar('SRC_URI') or ""
+ if srcuri not in uri_values:
+ fetch = bb.fetch2.Fetch(srcuri.split(), rdata)
+ if download:
+ fetch.download()
+ for pth in fetch.localpaths():
+ if pth not in localpaths:
+ localpaths.append(pth)
+ uri_values.append(srcuri)
+
+ fetch_urls(d)
+ if all_variants:
+ # Get files for other variants e.g. in the case of a SRC_URI_append
+ localdata = bb.data.createCopy(d)
+ variants = (localdata.getVar('BBCLASSEXTEND') or '').split()
+ if variants:
+ # Ensure we handle class-target if we're dealing with one of the variants
+ variants.append('target')
+ for variant in variants:
+ localdata.setVar('CLASSOVERRIDE', 'class-%s' % variant)
+ fetch_urls(localdata)
+
+ # Copy local files to target directory and gather any remote files
+ bb_dir = os.path.abspath(os.path.dirname(d.getVar('FILE'))) + os.sep
+ remotes = []
+ copied = []
+ # Need to do this in two steps since we want to check against the absolute path
+ includes = [os.path.abspath(path) for path in d.getVar('BBINCLUDED').split() if os.path.exists(path)]
+ # We also check this below, but we don't want any items in this list being considered remotes
+ includes = [path for path in includes if path.startswith(bb_dir)]
+ for path in localpaths + includes:
+ # Only import files that are under the meta directory
+ if path.startswith(bb_dir):
+ if not whole_dir:
+ relpath = os.path.relpath(path, bb_dir)
+ subdir = os.path.join(tgt_dir, os.path.dirname(relpath))
+ if not os.path.exists(subdir):
+ os.makedirs(subdir)
+ shutil.copy2(path, os.path.join(tgt_dir, relpath))
+ copied.append(relpath)
+ else:
+ remotes.append(path)
+ # Simply copy whole meta dir, if requested
+ if whole_dir:
+ shutil.copytree(bb_dir, tgt_dir)
+
+ return copied, remotes
+
+
+def get_recipe_local_files(d, patches=False, archives=False):
+ """Get a list of local files in SRC_URI within a recipe."""
+ import oe.patch
+ uris = (d.getVar('SRC_URI') or "").split()
+ fetch = bb.fetch2.Fetch(uris, d)
+ # FIXME this list should be factored out somewhere else (such as the
+ # fetcher) though note that this only encompasses actual container formats
+ # i.e. that can contain multiple files as opposed to those that only
+ # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
+ archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
+ ret = {}
+ for uri in uris:
+ if fetch.ud[uri].type == 'file':
+ if (not patches and
+ oe.patch.patch_path(uri, fetch, '', expand=False)):
+ continue
+ # Skip files that are referenced by absolute path
+ fname = fetch.ud[uri].basepath
+ if os.path.isabs(fname):
+ continue
+ # Handle subdir=
+ subdir = fetch.ud[uri].parm.get('subdir', '')
+ if subdir:
+ if os.path.isabs(subdir):
+ continue
+ fname = os.path.join(subdir, fname)
+ localpath = fetch.localpath(uri)
+ if not archives:
+ # Ignore archives that will be unpacked
+ if localpath.endswith(tuple(archive_exts)):
+ unpack = fetch.ud[uri].parm.get('unpack', True)
+ if unpack:
+ continue
+ ret[fname] = localpath
+ return ret
+
+
+def get_recipe_patches(d):
+ """Get a list of the patches included in SRC_URI within a recipe."""
+ import oe.patch
+ patches = oe.patch.src_patches(d, expand=False)
+ patchfiles = []
+ for patch in patches:
+ _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+ patchfiles.append(local)
+ return patchfiles
+
+
+def get_recipe_patched_files(d):
+ """
+ Get the list of patches for a recipe along with the files each patch modifies.
+ Params:
+ d: the datastore for the recipe
+ Returns:
+ a dict mapping patch file path to a list of tuples of changed files and
+ change mode ('A' for add, 'D' for delete or 'M' for modify)
+ """
+ import oe.patch
+ patches = oe.patch.src_patches(d, expand=False)
+ patchedfiles = {}
+ for patch in patches:
+ _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
+ striplevel = int(parm['striplevel'])
+ patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', '')))
+ return patchedfiles
+
+
+def validate_pn(pn):
+ """Perform validation on a recipe name (PN) for a new recipe."""
+ reserved_names = ['forcevariable', 'append', 'prepend', 'remove']
+ if not re.match('^[0-9a-z-.+]+$', pn):
+ return 'Recipe name "%s" is invalid: only characters 0-9, a-z, -, + and . are allowed' % pn
+ elif pn in reserved_names:
+ return 'Recipe name "%s" is invalid: is a reserved keyword' % pn
+ elif pn.startswith('pn-'):
+ return 'Recipe name "%s" is invalid: names starting with "pn-" are reserved' % pn
+ elif pn.endswith(('.bb', '.bbappend', '.bbclass', '.inc', '.conf')):
+ return 'Recipe name "%s" is invalid: should be just a name, not a file name' % pn
+ return ''
+
+
+def get_bbfile_path(d, destdir, extrapathhint=None):
+ """
+ Determine the correct path for a recipe within a layer
+ Parameters:
+ d: Recipe-specific datastore
+ destdir: destination directory. Can be the path to the base of the layer or a
+ partial path somewhere within the layer.
+ extrapathhint: a path relative to the base of the layer to try
+ """
+ import bb.cookerdata
+
+ destdir = os.path.abspath(destdir)
+ destlayerdir = find_layerdir(destdir)
+
+ # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
+ confdata = d.createCopy()
+ confdata.setVar('BBFILES', '')
+ confdata.setVar('LAYERDIR', destlayerdir)
+ destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
+ confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
+ pn = d.getVar('PN')
+
+ bbfilespecs = (confdata.getVar('BBFILES') or '').split()
+ if destdir == destlayerdir:
+ for bbfilespec in bbfilespecs:
+ if not bbfilespec.endswith('.bbappend'):
+ for match in glob.glob(bbfilespec):
+ splitext = os.path.splitext(os.path.basename(match))
+ if splitext[1] == '.bb':
+ mpn = splitext[0].split('_')[0]
+ if mpn == pn:
+ return os.path.dirname(match)
+
+ # Try to make up a path that matches BBFILES
+ # this is a little crude, but better than nothing
+ bpn = d.getVar('BPN')
+ recipefn = os.path.basename(d.getVar('FILE'))
+ pathoptions = [destdir]
+ if extrapathhint:
+ pathoptions.append(os.path.join(destdir, extrapathhint))
+ if destdir == destlayerdir:
+ pathoptions.append(os.path.join(destdir, 'recipes-%s' % bpn, bpn))
+ pathoptions.append(os.path.join(destdir, 'recipes', bpn))
+ pathoptions.append(os.path.join(destdir, bpn))
+ elif not destdir.endswith(('/' + pn, '/' + bpn)):
+ pathoptions.append(os.path.join(destdir, bpn))
+ closepath = ''
+ for pathoption in pathoptions:
+ bbfilepath = os.path.join(pathoption, 'test.bb')
+ for bbfilespec in bbfilespecs:
+ if fnmatch.fnmatchcase(bbfilepath, bbfilespec):
+ return pathoption
+ return None
+
+def get_bbappend_path(d, destlayerdir, wildcardver=False):
+ """Determine how a bbappend for a recipe should be named and located within another layer"""
+
+ import bb.cookerdata
+
+ destlayerdir = os.path.abspath(destlayerdir)
+ recipefile = d.getVar('FILE')
+ recipefn = os.path.splitext(os.path.basename(recipefile))[0]
+ if wildcardver and '_' in recipefn:
+ recipefn = recipefn.split('_', 1)[0] + '_%'
+ appendfn = recipefn + '.bbappend'
+
+ # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
+ confdata = d.createCopy()
+ confdata.setVar('BBFILES', '')
+ confdata.setVar('LAYERDIR', destlayerdir)
+ destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
+ confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
+
+ origlayerdir = find_layerdir(recipefile)
+ if not origlayerdir:
+ return (None, False)
+ # Now join this to the path where the bbappend is going and check if it is covered by BBFILES
+ appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
+ closepath = ''
+ pathok = True
+ for bbfilespec in confdata.getVar('BBFILES').split():
+ if fnmatch.fnmatchcase(appendpath, bbfilespec):
+ # Our append path works, we're done
+ break
+ elif bbfilespec.startswith(destlayerdir) and fnmatch.fnmatchcase('test.bbappend', os.path.basename(bbfilespec)):
+ # Try to find the longest matching path
+ if len(bbfilespec) > len(closepath):
+ closepath = bbfilespec
+ else:
+ # Unfortunately the bbappend layer and the original recipe's layer don't have the same structure
+ if closepath:
+ # bbappend layer's layer.conf at least has a spec that picks up .bbappend files
+ # Now we just need to substitute out any wildcards
+ appendsubdir = os.path.relpath(os.path.dirname(closepath), destlayerdir)
+ if 'recipes-*' in appendsubdir:
+ # Try to copy this part from the original recipe path
+ res = re.search('/recipes-[^/]+/', recipefile)
+ if res:
+ appendsubdir = appendsubdir.replace('/recipes-*/', res.group(0))
+ # This is crude, but we have to do something
+ appendsubdir = appendsubdir.replace('*', recipefn.split('_')[0])
+ appendsubdir = appendsubdir.replace('?', 'a')
+ appendpath = os.path.join(destlayerdir, appendsubdir, appendfn)
+ else:
+ pathok = False
+ return (appendpath, pathok)
+
+
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
+ """
+ Writes a bbappend file for a recipe
+ Parameters:
+ rd: data dictionary for the recipe
+ destlayerdir: base directory of the layer to place the bbappend in
+ (subdirectory path from there will be determined automatically)
+ srcfiles: dict of source files to add to SRC_URI, where the value
+ is the full path to the file to be added, and the value is the
+ original filename as it would appear in SRC_URI or None if it
+ isn't already present. You may pass None for this parameter if
+ you simply want to specify your own content via the extralines
+ parameter.
+ install: dict mapping entries in srcfiles to a tuple of two elements:
+ install path (*without* ${D} prefix) and permission value (as a
+ string, e.g. '0644').
+ wildcardver: True to use a % wildcard in the bbappend filename, or
+ False to make the bbappend specific to the recipe version.
+ machine:
+ If specified, make the changes in the bbappend specific to this
+ machine. This will also cause PACKAGE_ARCH = "${MACHINE_ARCH}"
+ to be added to the bbappend.
+ extralines:
+ Extra lines to add to the bbappend. This may be a dict of name
+ value pairs, or simply a list of the lines.
+ removevalues:
+ Variable values to remove - a dict of names/values.
+ redirect_output:
+ If specified, redirects writing the output file to the
+ specified directory (for dry-run purposes)
+ """
+
+ if not removevalues:
+ removevalues = {}
+
+ # Determine how the bbappend should be named
+ appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+ if not appendpath:
+ bb.error('Unable to determine layer directory containing %s' % recipefile)
+ return (None, None)
+ if not pathok:
+ bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+
+ appenddir = os.path.dirname(appendpath)
+ if not redirect_output:
+ bb.utils.mkdirhier(appenddir)
+
+ # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
+
+ layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
+ if not os.path.abspath(destlayerdir) in layerdirs:
+ bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+
+ bbappendlines = []
+ if extralines:
+ if isinstance(extralines, dict):
+ for name, value in extralines.items():
+ bbappendlines.append((name, '=', value))
+ else:
+ # Do our best to split it
+ for line in extralines:
+ if line[-1] == '\n':
+ line = line[:-1]
+ splitline = line.split(None, 2)
+ if len(splitline) == 3:
+ bbappendlines.append(tuple(splitline))
+ else:
+ raise Exception('Invalid extralines value passed')
+
+ def popline(varname):
+ for i in range(0, len(bbappendlines)):
+ if bbappendlines[i][0] == varname:
+ line = bbappendlines.pop(i)
+ return line
+ return None
+
+ def appendline(varname, op, value):
+ for i in range(0, len(bbappendlines)):
+ item = bbappendlines[i]
+ if item[0] == varname:
+ bbappendlines[i] = (item[0], item[1], item[2] + ' ' + value)
+ break
+ else:
+ bbappendlines.append((varname, op, value))
+
+ destsubdir = rd.getVar('PN')
+ if srcfiles:
+ bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+
+ appendoverride = ''
+ if machine:
+ bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
+ appendoverride = '_%s' % machine
+ copyfiles = {}
+ if srcfiles:
+ instfunclines = []
+ for newfile, origsrcfile in srcfiles.items():
+ srcfile = origsrcfile
+ srcurientry = None
+ if not srcfile:
+ srcfile = os.path.basename(newfile)
+ srcurientry = 'file://%s' % srcfile
+ # Double-check it's not there already
+ # FIXME do we care if the entry is added by another bbappend that might go away?
+ if not srcurientry in rd.getVar('SRC_URI').split():
+ if machine:
+ appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+ else:
+ appendline('SRC_URI', '+=', srcurientry)
+ copyfiles[newfile] = srcfile
+ if install:
+ institem = install.pop(newfile, None)
+ if institem:
+ (destpath, perms) = institem
+ instdestpath = replace_dir_vars(destpath, rd)
+ instdirline = 'install -d ${D}%s' % os.path.dirname(instdestpath)
+ if not instdirline in instfunclines:
+ instfunclines.append(instdirline)
+ instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
+ if instfunclines:
+ bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+
+ if redirect_output:
+ bb.note('Writing append file %s (dry-run)' % appendpath)
+ outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+ # Only take a copy if the file isn't already there (this function may be called
+ # multiple times per operation when we're handling overrides)
+ if os.path.exists(appendpath) and not os.path.exists(outfile):
+ shutil.copy2(appendpath, outfile)
+ else:
+ bb.note('Writing append file %s' % appendpath)
+ outfile = appendpath
+
+ if os.path.exists(outfile):
+ # Work around lack of nonlocal in python 2
+ extvars = {'destsubdir': destsubdir}
+
+ def appendfile_varfunc(varname, origvalue, op, newlines):
+ if varname == 'FILESEXTRAPATHS_prepend':
+ if origvalue.startswith('${THISDIR}/'):
+ popline('FILESEXTRAPATHS_prepend')
+ extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
+ elif varname == 'PACKAGE_ARCH':
+ if machine:
+ popline('PACKAGE_ARCH')
+ return (machine, None, 4, False)
+ elif varname.startswith('do_install_append'):
+ func = popline(varname)
+ if func:
+ instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
+ for line in func[2]:
+ if not line in instfunclines:
+ instfunclines.append(line)
+ return (instfunclines, None, 4, False)
+ else:
+ splitval = split_var_value(origvalue, assignment=False)
+ changed = False
+ removevar = varname
+ if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+ removevar = 'SRC_URI'
+ line = popline(varname)
+ if line:
+ if line[2] not in splitval:
+ splitval.append(line[2])
+ changed = True
+ else:
+ line = popline(varname)
+ if line:
+ splitval = [line[2]]
+ changed = True
+
+ if removevar in removevalues:
+ remove = removevalues[removevar]
+ if isinstance(remove, str):
+ if remove in splitval:
+ splitval.remove(remove)
+ changed = True
+ else:
+ for removeitem in remove:
+ if removeitem in splitval:
+ splitval.remove(removeitem)
+ changed = True
+
+ if changed:
+ newvalue = splitval
+ if len(newvalue) == 1:
+ # Ensure it's written out as one line
+ if '_append' in varname:
+ newvalue = ' ' + newvalue[0]
+ else:
+ newvalue = newvalue[0]
+ if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+ # There's no point appending nothing
+ newvalue = None
+ if varname.endswith('()'):
+ indent = 4
+ else:
+ indent = -1
+ return (newvalue, None, indent, True)
+ return (origvalue, None, 4, False)
+
+ varnames = [item[0] for item in bbappendlines]
+ if removevalues:
+ varnames.extend(list(removevalues.keys()))
+
+ with open(outfile, 'r') as f:
+ (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
+
+ destsubdir = extvars['destsubdir']
+ else:
+ updated = False
+ newlines = []
+
+ if bbappendlines:
+ for line in bbappendlines:
+ if line[0].endswith('()'):
+ newlines.append('%s {\n %s\n}\n' % (line[0], '\n '.join(line[2])))
+ else:
+ newlines.append('%s %s "%s"\n\n' % line)
+ updated = True
+
+ if updated:
+ with open(outfile, 'w') as f:
+ f.writelines(newlines)
+
+ if copyfiles:
+ if machine:
+ destsubdir = os.path.join(destsubdir, machine)
+ if redirect_output:
+ outdir = redirect_output
+ else:
+ outdir = appenddir
+ for newfile, srcfile in copyfiles.items():
+ filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
+ if os.path.abspath(newfile) != os.path.abspath(filedest):
+ if newfile.startswith(tempfile.gettempdir()):
+ newfiledisp = os.path.basename(newfile)
+ else:
+ newfiledisp = newfile
+ if redirect_output:
+ bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+ else:
+ bb.note('Copying %s to %s' % (newfiledisp, filedest))
+ bb.utils.mkdirhier(os.path.dirname(filedest))
+ shutil.copyfile(newfile, filedest)
+
+ return (appendpath, os.path.join(appenddir, destsubdir))
+
+
+def find_layerdir(fn):
+ """ Figure out the path to the base of the layer containing a file (e.g. a recipe)"""
+ pth = os.path.abspath(fn)
+ layerdir = ''
+ while pth:
+ if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
+ layerdir = pth
+ break
+ pth = os.path.dirname(pth)
+ if pth == '/':
+ return None
+ return layerdir
+
+
+def replace_dir_vars(path, d):
+ """Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})"""
+ dirvars = {}
+ # Sort by length so we get the variables we're interested in first
+ for var in sorted(list(d.keys()), key=len):
+ if var.endswith('dir') and var.lower() == var:
+ value = d.getVar(var)
+ if value.startswith('/') and not '\n' in value and value not in dirvars:
+ dirvars[value] = var
+ for dirpath in sorted(list(dirvars.keys()), reverse=True):
+ path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
+ return path
+
+def get_recipe_pv_without_srcpv(pv, uri_type):
+ """
+ Get PV without SRCPV common in SCM's for now only
+ support git.
+
+ Returns tuple with pv, prefix and suffix.
+ """
+ pfx = ''
+ sfx = ''
+
+ if uri_type == 'git':
+ git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+ m = git_regex.match(pv)
+
+ if m:
+ pv = m.group('ver')
+ pfx = m.group('pfx')
+ sfx = m.group('sfx')
+ else:
+ regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+ m = regex.match(pv)
+ if m:
+ pv = m.group('ver')
+ pfx = m.group('pfx')
+
+ return (pv, pfx, sfx)
+
+def get_recipe_upstream_version(rd):
+ """
+ Get upstream version of recipe using bb.fetch2 methods with support for
+ http, https, ftp and git.
+
+ bb.fetch2 exceptions can be raised,
+ FetchError when don't have network access or upstream site don't response.
+ NoMethodError when uri latest_versionstring method isn't implemented.
+
+ Returns a dictonary with version, repository revision, current_version, type and datetime.
+ Type can be A for Automatic, M for Manual and U for Unknown.
+ """
+ from bb.fetch2 import decodeurl
+ from datetime import datetime
+
+ ru = {}
+ ru['current_version'] = rd.getVar('PV')
+ ru['version'] = ''
+ ru['type'] = 'U'
+ ru['datetime'] = ''
+ ru['revision'] = ''
+
+ # XXX: If don't have SRC_URI means that don't have upstream sources so
+ # returns the current recipe version, so that upstream version check
+ # declares a match.
+ src_uris = rd.getVar('SRC_URI')
+ if not src_uris:
+ ru['version'] = ru['current_version']
+ ru['type'] = 'M'
+ ru['datetime'] = datetime.now()
+ return ru
+
+ # XXX: we suppose that the first entry points to the upstream sources
+ src_uri = src_uris.split()[0]
+ uri_type, _, _, _, _, _ = decodeurl(src_uri)
+
+ (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+ ru['current_version'] = pv
+
+ manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
+ if manual_upstream_version:
+ # manual tracking of upstream version.
+ ru['version'] = manual_upstream_version
+ ru['type'] = 'M'
+
+ manual_upstream_date = rd.getVar("CHECK_DATE")
+ if manual_upstream_date:
+ date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
+ else:
+ date = datetime.now()
+ ru['datetime'] = date
+
+ elif uri_type == "file":
+ # files are always up-to-date
+ ru['version'] = pv
+ ru['type'] = 'A'
+ ru['datetime'] = datetime.now()
+ else:
+ ud = bb.fetch2.FetchData(src_uri, rd)
+ if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+ revision = ud.method.latest_revision(ud, rd, 'default')
+ upversion = pv
+ if revision != rd.getVar("SRCREV"):
+ upversion = upversion + "-new-commits-available"
+ else:
+ pupver = ud.method.latest_versionstring(ud, rd)
+ (upversion, revision) = pupver
+
+ if upversion:
+ ru['version'] = upversion
+ ru['type'] = 'A'
+
+ if revision:
+ ru['revision'] = revision
+
+ ru['datetime'] = datetime.now()
+
+ return ru
diff --git a/poky/meta/lib/oe/rootfs.py b/poky/meta/lib/oe/rootfs.py
new file mode 100644
index 000000000..f8f717c05
--- /dev/null
+++ b/poky/meta/lib/oe/rootfs.py
@@ -0,0 +1,973 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.package_manager import *
+from oe.manifest import *
+import oe.path
+import filecmp
+import shutil
+import os
+import subprocess
+import re
+
+
+class Rootfs(object, metaclass=ABCMeta):
+ """
+ This is an abstract class. Do not instantiate this directly.
+ """
+
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ self.d = d
+ self.pm = None
+ self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+ self.deploydir = self.d.getVar('IMGDEPLOYDIR')
+ self.progress_reporter = progress_reporter
+ self.logcatcher = logcatcher
+
+ self.install_order = Manifest.INSTALL_ORDER
+
+ @abstractmethod
+ def _create(self):
+ pass
+
+ @abstractmethod
+ def _get_delayed_postinsts(self):
+ pass
+
+ @abstractmethod
+ def _save_postinsts(self):
+ pass
+
+ @abstractmethod
+ def _log_check(self):
+ pass
+
+ def _log_check_common(self, type, match):
+ # Ignore any lines containing log_check to avoid recursion, and ignore
+ # lines beginning with a + since sh -x may emit code which isn't
+ # actually executed, but may contain error messages
+ excludes = [ 'log_check', r'^\+' ]
+ if hasattr(self, 'log_check_expected_regexes'):
+ excludes.extend(self.log_check_expected_regexes)
+ excludes = [re.compile(x) for x in excludes]
+ r = re.compile(match)
+ log_path = self.d.expand("${T}/log.do_rootfs")
+ messages = []
+ with open(log_path, 'r') as log:
+ for line in log:
+ if self.logcatcher and self.logcatcher.contains(line.rstrip()):
+ continue
+ for ee in excludes:
+ m = ee.search(line)
+ if m:
+ break
+ if m:
+ continue
+
+ m = r.search(line)
+ if m:
+ messages.append('[log_check] %s' % line)
+ if messages:
+ if len(messages) == 1:
+ msg = '1 %s message' % type
+ else:
+ msg = '%d %s messages' % (len(messages), type)
+ msg = '[log_check] %s: found %s in the logfile:\n%s' % \
+ (self.d.getVar('PN'), msg, ''.join(messages))
+ if type == 'error':
+ bb.fatal(msg)
+ else:
+ bb.warn(msg)
+
+ def _log_check_warn(self):
+ self._log_check_common('warning', '^(warn|Warn|WARNING:)')
+
+ def _log_check_error(self):
+ self._log_check_common('error', self.log_check_regex)
+
+ def _insert_feed_uris(self):
+ if bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d):
+ self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
+ self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
+ self.d.getVar('PACKAGE_FEED_ARCHS'))
+
+
+ """
+ The _cleanup() method should be used to clean-up stuff that we don't really
+ want to end up on target. For example, in the case of RPM, the DB locks.
+ The method is called, once, at the end of create() method.
+ """
+ @abstractmethod
+ def _cleanup(self):
+ pass
+
+ def _setup_dbg_rootfs(self, dirs):
+ gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
+ if gen_debugfs != '1':
+ return
+
+ bb.note(" Renaming the original rootfs...")
+ try:
+ shutil.rmtree(self.image_rootfs + '-orig')
+ except:
+ pass
+ os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+
+ bb.note(" Creating debug rootfs...")
+ bb.utils.mkdirhier(self.image_rootfs)
+
+ bb.note(" Copying back package database...")
+ for dir in dirs:
+ if not os.path.isdir(self.image_rootfs + '-orig' + dir):
+ continue
+ bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
+ shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir, symlinks=True)
+
+ cpath = oe.cachedpath.CachedPath()
+ # Copy files located in /usr/lib/debug or /usr/src/debug
+ for dir in ["/usr/lib/debug", "/usr/src/debug"]:
+ src = self.image_rootfs + '-orig' + dir
+ if cpath.exists(src):
+ dst = self.image_rootfs + dir
+ bb.utils.mkdirhier(os.path.dirname(dst))
+ shutil.copytree(src, dst)
+
+ # Copy files with suffix '.debug' or located in '.debug' dir.
+ for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+ relative_dir = root[len(self.image_rootfs + '-orig'):]
+ for f in files:
+ if f.endswith('.debug') or '/.debug' in relative_dir:
+ bb.utils.mkdirhier(self.image_rootfs + relative_dir)
+ shutil.copy(os.path.join(root, f),
+ self.image_rootfs + relative_dir)
+
+ bb.note(" Install complementary '*-dbg' packages...")
+ self.pm.install_complementary('*-dbg')
+
+ bb.note(" Rename debug rootfs...")
+ try:
+ shutil.rmtree(self.image_rootfs + '-dbg')
+ except:
+ pass
+ os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+
+ bb.note(" Restoreing original rootfs...")
+ os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+
+ def _exec_shell_cmd(self, cmd):
+ fakerootcmd = self.d.getVar('FAKEROOT')
+ if fakerootcmd is not None:
+ exec_cmd = [fakerootcmd, cmd]
+ else:
+ exec_cmd = cmd
+
+ try:
+ subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+ return None
+
+ def create(self):
+ bb.note("###### Generate rootfs #######")
+ pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
+ post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
+ rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
+
+ bb.utils.mkdirhier(self.image_rootfs)
+
+ bb.utils.mkdirhier(self.deploydir)
+
+ execute_pre_post_process(self.d, pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ # call the package manager dependent create method
+ self._create()
+
+ sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
+ bb.utils.mkdirhier(sysconfdir)
+ with open(sysconfdir + "/version", "w+") as ver:
+ ver.write(self.d.getVar('BUILDNAME') + "\n")
+
+ execute_pre_post_process(self.d, rootfs_post_install_cmds)
+
+ self.pm.run_intercepts()
+
+ execute_pre_post_process(self.d, post_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d):
+ delayed_postinsts = self._get_delayed_postinsts()
+ if delayed_postinsts is not None:
+ bb.fatal("The following packages could not be configured "
+ "offline and rootfs is read-only: %s" %
+ delayed_postinsts)
+
+ if self.d.getVar('USE_DEVFS') != "1":
+ self._create_devfs()
+
+ self._uninstall_unneeded()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._insert_feed_uris()
+
+ self._run_ldconfig()
+
+ if self.d.getVar('USE_DEPMOD') != "0":
+ self._generate_kernel_module_deps()
+
+ self._cleanup()
+ self._log_check()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+
+ def _uninstall_unneeded(self):
+ # Remove unneeded init script symlinks
+ delayed_postinsts = self._get_delayed_postinsts()
+ if delayed_postinsts is None:
+ if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
+ self._exec_shell_cmd(["update-rc.d", "-f", "-r",
+ self.d.getVar('IMAGE_ROOTFS'),
+ "run-postinsts", "remove"])
+
+ image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+ True, False, self.d)
+ image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
+
+ if image_rorfs or image_rorfs_force == "1":
+ # Remove components that we don't need if it's a read-only rootfs
+ unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
+ pkgs_installed = image_list_installed_packages(self.d)
+ # Make sure update-alternatives is removed last. This is
+ # because its database has to available while uninstalling
+ # other packages, allowing alternative symlinks of packages
+ # to be uninstalled or to be managed correctly otherwise.
+ provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
+ pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
+
+ # update-alternatives provider is removed in its own remove()
+ # call because all package managers do not guarantee the packages
+ # are removed in the order they given in the list (which is
+ # passed to the command line). The sorting done earlier is
+ # utilized to implement the 2-stage removal.
+ if len(pkgs_to_remove) > 1:
+ self.pm.remove(pkgs_to_remove[:-1], False)
+ if len(pkgs_to_remove) > 0:
+ self.pm.remove([pkgs_to_remove[-1]], False)
+
+ if delayed_postinsts:
+ self._save_postinsts()
+ if image_rorfs:
+ bb.warn("There are post install scripts "
+ "in a read-only rootfs")
+
+ post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
+ execute_pre_post_process(self.d, post_uninstall_cmds)
+
+ runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
+ True, False, self.d)
+ if not runtime_pkgmanage:
+ # Remove the package manager data files
+ self.pm.remove_packaging_data()
+
+ def _run_ldconfig(self):
+ if self.d.getVar('LDCONFIGDEPEND'):
+ bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+ self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
+ 'new', '-v'])
+
+ def _check_for_kernel_modules(self, modules_dir):
+ for root, dirs, files in os.walk(modules_dir, topdown=True):
+ for name in files:
+ found_ko = name.endswith(".ko")
+ if found_ko:
+ return found_ko
+ return False
+
+ def _generate_kernel_module_deps(self):
+ modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
+ # if we don't have any modules don't bother to do the depmod
+ if not self._check_for_kernel_modules(modules_dir):
+ bb.note("No Kernel Modules found, not running depmod")
+ return
+
+ kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
+ 'kernel-abiversion')
+ if not os.path.exists(kernel_abi_ver_file):
+ bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+
+ kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
+ versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+ bb.utils.mkdirhier(versioned_modules_dir)
+
+ self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+
+ """
+ Create devfs:
+ * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
+ * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
+ for in the BBPATH
+ If neither are specified then the default name of files/device_table-minimal.txt
+ is searched for in the BBPATH (same as the old version.)
+ """
+ def _create_devfs(self):
+ devtable_list = []
+ devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
+ if devtable is not None:
+ devtable_list.append(devtable)
+ else:
+ devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
+ if devtables is None:
+ devtables = 'files/device_table-minimal.txt'
+ for devtable in devtables.split():
+ devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
+
+ for devtable in devtable_list:
+ self._exec_shell_cmd(["makedevs", "-r",
+ self.image_rootfs, "-D", devtable])
+
+
+class RpmRootfs(Rootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
+ '|exit 1|ERROR: |Error: |Error |ERROR '\
+ '|Failed |Failed: |Failed$|Failed\(\d+\):)'
+ self.manifest = RpmManifest(d, manifest_dir)
+
+ self.pm = RpmPM(d,
+ d.getVar('IMAGE_ROOTFS'),
+ self.d.getVar('TARGET_VENDOR')
+ )
+
+ self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
+ if self.inc_rpm_image_gen != "1":
+ bb.utils.remove(self.image_rootfs, True)
+ else:
+ self.pm.recovery_packaging_data()
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+
+ self.pm.create_configs()
+
+ '''
+ While rpm incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the new install solution manifest and the
+ old installed manifest.
+ '''
+ def _create_incremental(self, pkgs_initial_install):
+ if self.inc_rpm_image_gen == "1":
+
+ pkgs_to_install = list()
+ for pkg_type in pkgs_initial_install:
+ pkgs_to_install += pkgs_initial_install[pkg_type]
+
+ installed_manifest = self.pm.load_old_install_solution()
+ solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+ pkg_to_remove = list()
+ for pkg in installed_manifest:
+ if pkg not in solution_manifest:
+ pkg_to_remove.append(pkg)
+
+ self.pm.update()
+
+ bb.note('incremental update -- upgrade packages in place ')
+ self.pm.upgrade()
+ if pkg_to_remove != []:
+ bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ self.pm.autoremove()
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+ rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_rpm_image_gen == "1":
+ self._create_incremental(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install(pkgs_attempt, True)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
+
+ execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+ if self.inc_rpm_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+ 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+ def _get_delayed_postinsts(self):
+ postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+ if os.path.isdir(postinst_dir):
+ files = os.listdir(postinst_dir)
+ for f in files:
+ bb.note('Delayed package scriptlet: %s' % f)
+ return files
+
+ return None
+
+ def _save_postinsts(self):
+ # this is just a stub. For RPM, the failed postinstalls are
+ # already saved in /etc/rpm-postinsts
+ pass
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm._invoke_dnf(["clean", "all"])
+
+
+class DpkgOpkgRootfs(Rootfs):
+ def __init__(self, d, progress_reporter=None, logcatcher=None):
+ super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+
+ def _get_pkgs_postinsts(self, status_file):
+ def _get_pkg_depends_list(pkg_depends):
+ pkg_depends_list = []
+ # filter version requirements like libc (>= 1.1)
+ for dep in pkg_depends.split(', '):
+ m_dep = re.match("^(.*) \(.*\)$", dep)
+ if m_dep:
+ dep = m_dep.group(1)
+ pkg_depends_list.append(dep)
+
+ return pkg_depends_list
+
+ pkgs = {}
+ pkg_name = ""
+ pkg_status_match = False
+ pkg_depends = ""
+
+ with open(status_file) as status:
+ data = status.read()
+ status.close()
+ for line in data.split('\n'):
+ m_pkg = re.match("^Package: (.*)", line)
+ m_status = re.match("^Status:.*unpacked", line)
+ m_depends = re.match("^Depends: (.*)", line)
+
+ if m_pkg is not None:
+ if pkg_name and pkg_status_match:
+ pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+
+ pkg_name = m_pkg.group(1)
+ pkg_status_match = False
+ pkg_depends = ""
+ elif m_status is not None:
+ pkg_status_match = True
+ elif m_depends is not None:
+ pkg_depends = m_depends.group(1)
+
+ # remove package dependencies not in postinsts
+ pkg_names = list(pkgs.keys())
+ for pkg_name in pkg_names:
+ deps = pkgs[pkg_name][:]
+
+ for d in deps:
+ if d not in pkg_names:
+ pkgs[pkg_name].remove(d)
+
+ return pkgs
+
+ def _get_delayed_postinsts_common(self, status_file):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise RuntimeError("Packages %s and %s have " \
+ "a circular dependency in postinsts scripts." \
+ % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+
+ resolved.append(node)
+
+ pkg_list = []
+
+ pkgs = None
+ if not self.d.getVar('PACKAGE_INSTALL').strip():
+ bb.note("Building empty image")
+ else:
+ pkgs = self._get_pkgs_postinsts(status_file)
+ if pkgs:
+ root = "__packagegroup_postinst__"
+ pkgs[root] = list(pkgs.keys())
+ _dep_resolve(pkgs, root, pkg_list, [])
+ pkg_list.remove(root)
+
+ if len(pkg_list) == 0:
+ return None
+
+ return pkg_list
+
+ def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+ num = 0
+ for p in self._get_delayed_postinsts():
+ bb.utils.mkdirhier(dst_postinst_dir)
+
+ if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+ shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+ os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+ num += 1
+
+class DpkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '^E:'
+ self.log_check_expected_regexes = \
+ [
+ "^E: Unmet dependencies."
+ ]
+
+ bb.utils.remove(self.image_rootfs, True)
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+ self.manifest = DpkgManifest(d, manifest_dir)
+ self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
+
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+ deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
+
+ alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+ bb.utils.mkdirhier(alt_dir)
+
+ # update PM index files
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Don't support incremental, so skip that
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ if self.progress_reporter:
+ # Don't support attemptonly, so skip that
+ self.progress_reporter.next_stage()
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+ self.pm.fix_broken_dependencies()
+
+ self.pm.mark_packages("installed")
+
+ self.pm.run_pre_post_installs()
+
+ execute_pre_post_process(self.d, deb_post_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+ def _get_delayed_postinsts(self):
+ status_file = self.image_rootfs + "/var/lib/dpkg/status"
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ pass
+
+
+class OpkgRootfs(DpkgOpkgRootfs):
+ def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+ super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
+ self.log_check_regex = '(exit 1|Collected errors)'
+
+ self.manifest = OpkgManifest(d, manifest_dir)
+ self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
+
+ self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
+ if self._remove_old_rootfs():
+ bb.utils.remove(self.image_rootfs, True)
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ else:
+ self.pm = OpkgPM(d,
+ self.image_rootfs,
+ self.opkg_conf,
+ self.pkg_archs)
+ self.pm.recover_packaging_data()
+
+ bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
+
+ def _prelink_file(self, root_dir, filename):
+ bb.note('prelink %s in %s' % (filename, root_dir))
+ prelink_cfg = oe.path.join(root_dir,
+ self.d.expand('${sysconfdir}/prelink.conf'))
+ if not os.path.exists(prelink_cfg):
+ shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
+ prelink_cfg)
+
+ cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
+ self._exec_shell_cmd([cmd_prelink,
+ '--root',
+ root_dir,
+ '-amR',
+ '-N',
+ '-c',
+ self.d.expand('${sysconfdir}/prelink.conf')])
+
+ '''
+ Compare two files with the same key twice to see if they are equal.
+ If they are not equal, it means they are duplicated and come from
+ different packages.
+ 1st: Comapre them directly;
+ 2nd: While incremental image creation is enabled, one of the
+ files could be probaly prelinked in the previous image
+ creation and the file has been changed, so we need to
+ prelink the other one and compare them.
+ '''
+ def _file_equal(self, key, f1, f2):
+
+ # Both of them are not prelinked
+ if filecmp.cmp(f1, f2):
+ return True
+
+ if self.image_rootfs not in f1:
+ self._prelink_file(f1.replace(key, ''), f1)
+
+ if self.image_rootfs not in f2:
+ self._prelink_file(f2.replace(key, ''), f2)
+
+ # Both of them are prelinked
+ if filecmp.cmp(f1, f2):
+ return True
+
+ # Not equal
+ return False
+
+ """
+ This function was reused from the old implementation.
+ See commit: "image.bbclass: Added variables for multilib support." by
+ Lianhao Lu.
+ """
+ def _multilib_sanity_test(self, dirs):
+
+ allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
+ if allow_replace is None:
+ allow_replace = ""
+
+ allow_rep = re.compile(re.sub("\|$", "", allow_replace))
+ error_prompt = "Multilib check error:"
+
+ files = {}
+ for dir in dirs:
+ for root, subfolders, subfiles in os.walk(dir):
+ for file in subfiles:
+ item = os.path.join(root, file)
+ key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+ valid = True
+ if key in files:
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid = True
+ else:
+ if os.path.exists(files[key]) and \
+ os.path.exists(item) and \
+ not self._file_equal(key, files[key], item):
+ valid = False
+ bb.fatal("%s duplicate files %s %s is not the same\n" %
+ (error_prompt, item, files[key]))
+
+ #pass the check, add to list
+ if valid:
+ files[key] = item
+
+ def _multilib_test_install(self, pkgs):
+ ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
+ bb.utils.mkdirhier(ml_temp)
+
+ dirs = [self.image_rootfs]
+
+ for variant in self.d.getVar("MULTILIB_VARIANTS").split():
+ ml_target_rootfs = os.path.join(ml_temp, variant)
+
+ bb.utils.remove(ml_target_rootfs, True)
+
+ ml_opkg_conf = os.path.join(ml_temp,
+ variant + "-" + os.path.basename(self.opkg_conf))
+
+ ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+
+ ml_pm.update()
+ ml_pm.install(pkgs)
+
+ dirs.append(ml_target_rootfs)
+
+ self._multilib_sanity_test(dirs)
+
+ '''
+ While ipk incremental image generation is enabled, it will remove the
+ unneeded pkgs by comparing the old full manifest in previous existing
+ image and the new full manifest in the current image.
+ '''
+ def _remove_extra_packages(self, pkgs_initial_install):
+ if self.inc_opkg_image_gen == "1":
+ # Parse full manifest in previous existing image creation session
+ old_full_manifest = self.manifest.parse_full_manifest()
+
+ # Create full manifest for the current image session, the old one
+ # will be replaced by the new one.
+ self.manifest.create_full(self.pm)
+
+ # Parse full manifest in current image creation session
+ new_full_manifest = self.manifest.parse_full_manifest()
+
+ pkg_to_remove = list()
+ for pkg in old_full_manifest:
+ if pkg not in new_full_manifest:
+ pkg_to_remove.append(pkg)
+
+ if pkg_to_remove != []:
+ bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+ self.pm.remove(pkg_to_remove)
+
+ '''
+ Compare with previous existing image creation, if some conditions
+ triggered, the previous old image should be removed.
+ The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+ and BAD_RECOMMENDATIONS' has been changed.
+ '''
+ def _remove_old_rootfs(self):
+ if self.inc_opkg_image_gen != "1":
+ return True
+
+ vars_list_file = self.d.expand('${T}/vars_list')
+
+ old_vars_list = ""
+ if os.path.exists(vars_list_file):
+ old_vars_list = open(vars_list_file, 'r+').read()
+
+ new_vars_list = '%s:%s:%s\n' % \
+ ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+ (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
+ open(vars_list_file, 'w+').write(new_vars_list)
+
+ if old_vars_list != new_vars_list:
+ return True
+
+ return False
+
+ def _create(self):
+ pkgs_to_install = self.manifest.parse_initial_manifest()
+ opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+ opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
+
+ # update PM index files, unless users provide their own feeds
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
+ self.pm.write_index()
+
+ execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+ # Steps are a bit different in order, skip next
+ self.progress_reporter.next_stage()
+
+ self.pm.update()
+
+ self.pm.handle_bad_recommendations()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ if self.inc_opkg_image_gen == "1":
+ self._remove_extra_packages(pkgs_to_install)
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ # For multilib, we perform a sanity test before final install
+ # If sanity test fails, it will automatically do a bb.fatal()
+ # and the installation will stop
+ if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+ self._multilib_test_install(pkgs_to_install[pkg_type])
+
+ self.pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ self.pm.install_complementary()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
+ opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
+ self._setup_dbg_rootfs([opkg_dir])
+
+ execute_pre_post_process(self.d, opkg_post_process_cmds)
+
+ if self.inc_opkg_image_gen == "1":
+ self.pm.backup_packaging_data()
+
+ if self.progress_reporter:
+ self.progress_reporter.next_stage()
+
+ @staticmethod
+ def _depends_list():
+ return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+ def _get_delayed_postinsts(self):
+ status_file = os.path.join(self.image_rootfs,
+ self.d.getVar('OPKGLIBDIR').strip('/'),
+ "opkg", "status")
+ return self._get_delayed_postinsts_common(status_file)
+
+ def _save_postinsts(self):
+ dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+ src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+ return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+ def _log_check(self):
+ self._log_check_warn()
+ self._log_check_error()
+
+ def _cleanup(self):
+ self.pm.remove_lists()
+
+def get_class_for_type(imgtype):
+ return {"rpm": RpmRootfs,
+ "ipk": OpkgRootfs,
+ "deb": DpkgRootfs}[imgtype]
+
+def variable_depends(d, manifest_dir=None):
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ cls = get_class_for_type(img_type)
+ return cls._depends_list()
+
+def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
+ env_bkp = os.environ.copy()
+
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ if img_type == "rpm":
+ RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+ elif img_type == "ipk":
+ OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+ elif img_type == "deb":
+ DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
+
+ os.environ.clear()
+ os.environ.update(env_bkp)
+
+
+def image_list_installed_packages(d, rootfs_dir=None):
+ if not rootfs_dir:
+ rootfs_dir = d.getVar('IMAGE_ROOTFS')
+
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ if img_type == "rpm":
+ return RpmPkgsList(d, rootfs_dir).list_pkgs()
+ elif img_type == "ipk":
+ return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
+ elif img_type == "deb":
+ return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+if __name__ == "__main__":
+ """
+ We should be able to run this as a standalone script, from outside bitbake
+ environment.
+ """
+ """
+ TBD
+ """
diff --git a/poky/meta/lib/oe/sdk.py b/poky/meta/lib/oe/sdk.py
new file mode 100644
index 000000000..d6a503372
--- /dev/null
+++ b/poky/meta/lib/oe/sdk.py
@@ -0,0 +1,473 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.manifest import *
+from oe.package_manager import *
+import os
+import shutil
+import glob
+import traceback
+
+def generate_locale_archive(d, rootfs):
+ # Pretty sure we don't need this for SDK archive generation but
+ # keeping it to be safe...
+ target_arch = d.getVar('SDK_ARCH')
+ locale_arch_options = { \
+ "arm": ["--uint32-align=4", "--little-endian"],
+ "armeb": ["--uint32-align=4", "--big-endian"],
+ "aarch64": ["--uint32-align=4", "--little-endian"],
+ "aarch64_be": ["--uint32-align=4", "--big-endian"],
+ "sh4": ["--uint32-align=4", "--big-endian"],
+ "powerpc": ["--uint32-align=4", "--big-endian"],
+ "powerpc64": ["--uint32-align=4", "--big-endian"],
+ "mips": ["--uint32-align=4", "--big-endian"],
+ "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+ "mips64": ["--uint32-align=4", "--big-endian"],
+ "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+ "mipsel": ["--uint32-align=4", "--little-endian"],
+ "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+ "mips64el": ["--uint32-align=4", "--little-endian"],
+ "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+ "i586": ["--uint32-align=4", "--little-endian"],
+ "i686": ["--uint32-align=4", "--little-endian"],
+ "x86_64": ["--uint32-align=4", "--little-endian"]
+ }
+ if target_arch in locale_arch_options:
+ arch_options = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+ localedir = oe.path.join(rootfs, d.getVar("libdir_nativesdk"), "locale")
+ # Need to set this so cross-localedef knows where the archive is
+ env = dict(os.environ)
+ env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+ for name in os.listdir(localedir):
+ path = os.path.join(localedir, name)
+ if os.path.isdir(path):
+ try:
+ cmd = ["cross-localedef", "--verbose"]
+ cmd += arch_options
+ cmd += ["--add-to-archive", path]
+ subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
+ except Exception as e:
+ bb.fatal("Cannot create locale archive: %s" % e.output)
+
+class Sdk(object, metaclass=ABCMeta):
+ def __init__(self, d, manifest_dir):
+ self.d = d
+ self.sdk_output = self.d.getVar('SDK_OUTPUT')
+ self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
+ self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
+ self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
+
+ self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
+ self.sdk_host_sysroot = self.sdk_output
+
+ if manifest_dir is None:
+ self.manifest_dir = self.d.getVar("SDK_DIR")
+ else:
+ self.manifest_dir = manifest_dir
+
+ self.remove(self.sdk_output, True)
+
+ self.install_order = Manifest.INSTALL_ORDER
+
+ @abstractmethod
+ def _populate(self):
+ pass
+
+ def populate(self):
+ self.mkdirhier(self.sdk_output)
+
+ # call backend dependent implementation
+ self._populate()
+
+ # Don't ship any libGL in the SDK
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk').strip('/'),
+ "libGL*"))
+
+ # Fix or remove broken .la files
+ self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('libdir_nativesdk').strip('/'),
+ "*.la"))
+
+ # Link the ld.so.cache file into the hosts filesystem
+ link_name = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.sysconfdir, "ld.so.cache")
+ self.mkdirhier(os.path.dirname(link_name))
+ os.symlink("/etc/ld.so.cache", link_name)
+
+ execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
+
+ def movefile(self, sourcefile, destdir):
+ try:
+ # FIXME: this check of movefile's return code to None should be
+ # fixed within the function to use only exceptions to signal when
+ # something goes wrong
+ if (bb.utils.movefile(sourcefile, destdir) == None):
+ raise OSError("moving %s to %s failed"
+ %(sourcefile, destdir))
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.error("unable to place %s in final SDK location" % sourcefile)
+
+ def mkdirhier(self, dirpath):
+ try:
+ bb.utils.mkdirhier(dirpath)
+ except OSError as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.fatal("cannot make dir for SDK: %s" % dirpath)
+
+ def remove(self, path, recurse=False):
+ try:
+ bb.utils.remove(path, recurse)
+ #FIXME: using umbrella exc catching because bb.utils method raises it
+ except Exception as e:
+ bb.debug(1, "printing the stack trace\n %s" %traceback.format_exc())
+ bb.warn("cannot remove SDK dir: %s" % path)
+
+ def install_locales(self, pm):
+ # This is only relevant for glibc
+ if self.d.getVar("TCLIBC") != "glibc":
+ return
+
+ linguas = self.d.getVar("SDKIMAGE_LINGUAS")
+ if linguas:
+ import fnmatch
+ # Install the binary locales
+ if linguas == "all":
+ pm.install_glob("nativesdk-glibc-binary-localedata-*.utf-8", sdk=True)
+ else:
+ for lang in linguas.split():
+ pm.install("nativesdk-glibc-binary-localedata-%s.utf-8" % lang)
+ # Generate a locale archive of them
+ generate_locale_archive(self.d, oe.path.join(self.sdk_host_sysroot, self.sdk_native_path))
+ # And now delete the binary locales
+ pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
+ pm.remove(pkgs)
+ else:
+ # No linguas so do nothing
+ pass
+
+
+class RpmSdk(Sdk):
+ def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
+ super(RpmSdk, self).__init__(d, manifest_dir)
+
+ self.target_manifest = RpmManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = RpmManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ rpm_repo_workdir = "oe-sdk-repo"
+ if "sdk_ext" in d.getVar("BB_RUNTASK"):
+ rpm_repo_workdir = "oe-sdk-ext-repo"
+
+ self.target_pm = RpmPM(d,
+ self.sdk_target_sysroot,
+ self.d.getVar('TARGET_VENDOR'),
+ 'target',
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ self.host_pm = RpmPM(d,
+ self.sdk_host_sysroot,
+ self.d.getVar('SDK_VENDOR'),
+ 'host',
+ "SDK_PACKAGE_ARCHS",
+ "SDK_OS",
+ rpm_repo_workdir=rpm_repo_workdir
+ )
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.create_configs()
+ pm.write_index()
+ pm.update()
+
+ pkgs = []
+ pkgs_attempt = []
+ for pkg_type in pkgs_to_install:
+ if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+ pkgs_attempt += pkgs_to_install[pkg_type]
+ else:
+ pkgs += pkgs_to_install[pkg_type]
+
+ pm.install(pkgs)
+
+ pm.install(pkgs_attempt, True)
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ # Move host RPM library data
+ native_rpm_state_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib",
+ "rpm"
+ )
+ self.mkdirhier(native_rpm_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output,
+ "var",
+ "lib",
+ "rpm",
+ "*")):
+ self.movefile(f, native_rpm_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+ # Move host sysconfig data
+ native_sysconf_dir = os.path.join(self.sdk_output,
+ self.sdk_native_path,
+ self.d.getVar('sysconfdir',
+ True).strip('/'),
+ )
+ self.mkdirhier(native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+ self.movefile(f, native_sysconf_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
+ self.movefile(f, native_sysconf_dir)
+ self.remove(os.path.join(self.sdk_output, "etc"), True)
+
+
+class OpkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(OpkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+ self.host_conf = self.d.getVar("IPKGCONF_SDK")
+
+ self.target_manifest = OpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = OpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+ self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
+
+ self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+ self.d.getVar("SDK_PACKAGE_ARCHS"))
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
+ pm.write_index()
+
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+ host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+ self.mkdirhier(target_sysconfdir)
+ shutil.copy(self.target_conf, target_sysconfdir)
+ os.chmod(os.path.join(target_sysconfdir,
+ os.path.basename(self.target_conf)), 0o644)
+
+ self.mkdirhier(host_sysconfdir)
+ shutil.copy(self.host_conf, host_sysconfdir)
+ os.chmod(os.path.join(host_sysconfdir,
+ os.path.basename(self.host_conf)), 0o644)
+
+ native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ self.d.getVar('localstatedir_nativesdk').strip('/'),
+ "lib", "opkg")
+ self.mkdirhier(native_opkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+ self.movefile(f, native_opkg_state_dir)
+
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+class DpkgSdk(Sdk):
+ def __init__(self, d, manifest_dir=None):
+ super(DpkgSdk, self).__init__(d, manifest_dir)
+
+ self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+ self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
+
+ self.target_manifest = DpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_TARGET)
+ self.host_manifest = DpkgManifest(d, self.manifest_dir,
+ Manifest.MANIFEST_TYPE_SDK_HOST)
+
+ self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+ self.d.getVar("PACKAGE_ARCHS"),
+ self.d.getVar("DPKG_ARCH"),
+ self.target_conf_dir)
+
+ self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+ self.d.getVar("SDK_PACKAGE_ARCHS"),
+ self.d.getVar("DEB_SDK_ARCH"),
+ self.host_conf_dir)
+
+ def _copy_apt_dir_to(self, dst_dir):
+ staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
+
+ self.remove(dst_dir, True)
+
+ shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+ def _populate_sysroot(self, pm, manifest):
+ pkgs_to_install = manifest.parse_initial_manifest()
+
+ pm.write_index()
+ pm.update()
+
+ for pkg_type in self.install_order:
+ if pkg_type in pkgs_to_install:
+ pm.install(pkgs_to_install[pkg_type],
+ [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+ def _populate(self):
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND"))
+
+ bb.note("Installing TARGET packages")
+ self._populate_sysroot(self.target_pm, self.target_manifest)
+
+ self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
+
+ self.target_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.target_pm.remove_packaging_data()
+
+ bb.note("Installing NATIVESDK packages")
+ self._populate_sysroot(self.host_pm, self.host_manifest)
+ self.install_locales(self.host_pm)
+
+ self.host_pm.run_intercepts()
+
+ execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
+
+ self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+ "etc", "apt"))
+
+ if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
+ self.host_pm.remove_packaging_data()
+
+ native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+ "var", "lib", "dpkg")
+ self.mkdirhier(native_dpkg_state_dir)
+ for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+ self.movefile(f, native_dpkg_state_dir)
+ self.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+
+def sdk_list_installed_packages(d, target, rootfs_dir=None):
+ if rootfs_dir is None:
+ sdk_output = d.getVar('SDK_OUTPUT')
+ target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
+
+ rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ if img_type == "rpm":
+ arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
+ os_var = ["SDK_OS", None][target is True]
+ return RpmPkgsList(d, rootfs_dir).list_pkgs()
+ elif img_type == "ipk":
+ conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
+ return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
+ elif img_type == "deb":
+ return DpkgPkgsList(d, rootfs_dir).list_pkgs()
+
+def populate_sdk(d, manifest_dir=None):
+ env_bkp = os.environ.copy()
+
+ img_type = d.getVar('IMAGE_PKGTYPE')
+ if img_type == "rpm":
+ RpmSdk(d, manifest_dir).populate()
+ elif img_type == "ipk":
+ OpkgSdk(d, manifest_dir).populate()
+ elif img_type == "deb":
+ DpkgSdk(d, manifest_dir).populate()
+
+ os.environ.clear()
+ os.environ.update(env_bkp)
+
+def get_extra_sdkinfo(sstate_dir):
+ """
+ This function is going to be used for generating the target and host manifest files packages of eSDK.
+ """
+ import math
+
+ extra_info = {}
+ extra_info['tasksizes'] = {}
+ extra_info['filesizes'] = {}
+ for root, _, files in os.walk(sstate_dir):
+ for fn in files:
+ if fn.endswith('.tgz'):
+ fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+ task = fn.rsplit(':',1)[1].split('_',1)[1].split(',')[0]
+ origtotal = extra_info['tasksizes'].get(task, 0)
+ extra_info['tasksizes'][task] = origtotal + fsize
+ extra_info['filesizes'][fn] = fsize
+ return extra_info
+
+if __name__ == "__main__":
+ pass
diff --git a/poky/meta/lib/oe/sstatesig.py b/poky/meta/lib/oe/sstatesig.py
new file mode 100644
index 000000000..b82e0f422
--- /dev/null
+++ b/poky/meta/lib/oe/sstatesig.py
@@ -0,0 +1,404 @@
+import bb.siggen
+import oe
+
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+ # Return True if we should keep the dependency, False to drop it
+ def isNative(x):
+ return x.endswith("-native")
+ def isCross(x):
+ return "-cross-" in x
+ def isNativeSDK(x):
+ return x.startswith("nativesdk-")
+ def isKernel(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
+ def isPackageGroup(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return "/packagegroup.bbclass" in inherits
+ def isAllArch(fn):
+ inherits = " ".join(dataCache.inherits[fn])
+ return "/allarch.bbclass" in inherits
+ def isImage(fn):
+ return "/image.bbclass" in " ".join(dataCache.inherits[fn])
+
+ # (Almost) always include our own inter-task dependencies.
+ # The exception is the special do_kernel_configme->do_unpack_and_patch
+ # dependency from archiver.bbclass.
+ if recipename == depname:
+ if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
+ return False
+ return True
+
+ # Exclude well defined recipe->dependency
+ if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
+ return False
+
+ # Check for special wildcard
+ if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+ return False
+
+ # Don't change native/cross/nativesdk recipe dependencies any further
+ if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
+ return True
+
+ # Only target packages beyond here
+
+ # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
+ if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
+ return False
+
+ # Exclude well defined machine specific configurations which don't change ABI
+ if depname in siggen.abisaferecipes and not isImage(fn):
+ return False
+
+ # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
+ # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+ # is machine specific.
+ # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
+ # and we reccomend a kernel-module, we exclude the dependency.
+ depfn = dep.rsplit(".", 1)[0]
+ if dataCache and isKernel(depfn) and not isKernel(fn):
+ for pkg in dataCache.runrecs[fn]:
+ if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+ return False
+
+ # Default to keep dependencies
+ return True
+
+def sstate_lockedsigs(d):
+ sigs = {}
+ types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
+ for t in types:
+ siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
+ lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
+ for ls in lockedsigs:
+ pn, task, h = ls.split(":", 2)
+ if pn not in sigs:
+ sigs[pn] = {}
+ sigs[pn][task] = [h, siggen_lockedsigs_var]
+ return sigs
+
+class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
+ name = "OEBasic"
+ def init_rundepcheck(self, data):
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
+ pass
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
+ name = "OEBasicHash"
+ def init_rundepcheck(self, data):
+ self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+ self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
+ self.lockedsigs = sstate_lockedsigs(data)
+ self.lockedhashes = {}
+ self.lockedpnmap = {}
+ self.lockedhashfn = {}
+ self.machine = data.getVar("MACHINE")
+ self.mismatch_msgs = []
+ self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
+ "").split()
+ self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
+ pass
+
+ def tasks_resolved(self, virtmap, virtpnmap, dataCache):
+ # Translate virtual/xxx entries to PN values
+ newabisafe = []
+ for a in self.abisaferecipes:
+ if a in virtpnmap:
+ newabisafe.append(virtpnmap[a])
+ else:
+ newabisafe.append(a)
+ self.abisaferecipes = newabisafe
+ newsafedeps = []
+ for a in self.saferecipedeps:
+ a1, a2 = a.split("->")
+ if a1 in virtpnmap:
+ a1 = virtpnmap[a1]
+ if a2 in virtpnmap:
+ a2 = virtpnmap[a2]
+ newsafedeps.append(a1 + "->" + a2)
+ self.saferecipedeps = newsafedeps
+
+ def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+ return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+ def get_taskdata(self):
+ data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
+ return (data, self.lockedpnmap, self.lockedhashfn)
+
+ def set_taskdata(self, data):
+ coredata, self.lockedpnmap, self.lockedhashfn = data
+ super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+
+ def dump_sigs(self, dataCache, options):
+ sigfile = os.getcwd() + "/locked-sigs.inc"
+ bb.plain("Writing locked sigs to %s" % sigfile)
+ self.dump_lockedsigs(sigfile)
+ return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
+
+ def get_taskhash(self, fn, task, deps, dataCache):
+ h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
+
+ recipename = dataCache.pkg_fn[fn]
+ self.lockedpnmap[fn] = recipename
+ self.lockedhashfn[fn] = dataCache.hashfn[fn]
+
+ unlocked = False
+ if recipename in self.unlockedrecipes:
+ unlocked = True
+ else:
+ def recipename_from_dep(dep):
+ # The dep entry will look something like
+ # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
+ # ...
+ fn = dep.rsplit('.', 1)[0]
+ return dataCache.pkg_fn[fn]
+
+ # If any unlocked recipe is in the direct dependencies then the
+ # current recipe should be unlocked as well.
+ depnames = [ recipename_from_dep(x) for x in deps ]
+ if any(x in y for y in depnames for x in self.unlockedrecipes):
+ self.unlockedrecipes[recipename] = ''
+ unlocked = True
+
+ if not unlocked and recipename in self.lockedsigs:
+ if task in self.lockedsigs[recipename]:
+ k = fn + "." + task
+ h_locked = self.lockedsigs[recipename][task][0]
+ var = self.lockedsigs[recipename][task][1]
+ self.lockedhashes[k] = h_locked
+ self.taskhash[k] = h_locked
+ #bb.warn("Using %s %s %s" % (recipename, task, h))
+
+ if h != h_locked:
+ self.mismatch_msgs.append('The %s:%s sig is computed to be %s, but the sig is locked to %s in %s'
+ % (recipename, task, h, h_locked, var))
+
+ return h_locked
+ #bb.warn("%s %s %s" % (recipename, task, h))
+ return h
+
+ def dump_sigtask(self, fn, task, stampbase, runtime):
+ k = fn + "." + task
+ if k in self.lockedhashes:
+ return
+ super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
+
+ def dump_lockedsigs(self, sigfile, taskfilter=None):
+ types = {}
+ for k in self.runtaskdeps:
+ if taskfilter:
+ if not k in taskfilter:
+ continue
+ fn = k.rsplit(".",1)[0]
+ t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
+ t = 't-' + t.replace('_', '-')
+ if t not in types:
+ types[t] = []
+ types[t].append(k)
+
+ with open(sigfile, "w") as f:
+ l = sorted(types)
+ for t in l:
+ f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
+ types[t].sort()
+ sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
+ for k in sortedk:
+ fn = k.rsplit(".",1)[0]
+ task = k.rsplit(".",1)[1]
+ if k not in self.taskhash:
+ continue
+ f.write(" " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+ f.write(' "\n')
+ f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+
+ def dump_siglist(self, sigfile):
+ with open(sigfile, "w") as f:
+ tasks = []
+ for taskitem in self.taskhash:
+ (fn, task) = taskitem.rsplit(".", 1)
+ pn = self.lockedpnmap[fn]
+ tasks.append((pn, task, fn, self.taskhash[taskitem]))
+ for (pn, task, fn, taskhash) in sorted(tasks):
+ f.write('%s.%s %s %s\n' % (pn, task, fn, taskhash))
+
+ def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+ warn_msgs = []
+ error_msgs = []
+ sstate_missing_msgs = []
+
+ for task in range(len(sq_fn)):
+ if task not in ret:
+ for pn in self.lockedsigs:
+ if sq_hash[task] in iter(self.lockedsigs[pn].values()):
+ if sq_task[task] == 'do_shared_workdir':
+ continue
+ sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
+ % (pn, sq_task[task], sq_hash[task]))
+
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
+ if checklevel == 'warn':
+ warn_msgs += self.mismatch_msgs
+ elif checklevel == 'error':
+ error_msgs += self.mismatch_msgs
+
+ checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
+ if checklevel == 'warn':
+ warn_msgs += sstate_missing_msgs
+ elif checklevel == 'error':
+ error_msgs += sstate_missing_msgs
+
+ if warn_msgs:
+ bb.warn("\n".join(warn_msgs))
+ if error_msgs:
+ bb.fatal("\n".join(error_msgs))
+
+
+# Insert these classes into siggen's namespace so it can see and select them
+bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
+bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+
+
+def find_siginfo(pn, taskname, taskhashlist, d):
+ """ Find signature data files for comparison purposes """
+
+ import fnmatch
+ import glob
+
+ if not taskname:
+ # We have to derive pn and taskname
+ key = pn
+ splitit = key.split('.bb.')
+ taskname = splitit[1]
+ pn = os.path.basename(splitit[0]).split('_')[0]
+ if key.startswith('virtual:native:'):
+ pn = pn + '-native'
+
+ hashfiles = {}
+ filedates = {}
+
+ def get_hashval(siginfo):
+ if siginfo.endswith('.siginfo'):
+ return siginfo.rpartition(':')[2].partition('_')[0]
+ else:
+ return siginfo.rpartition('.')[2]
+
+ # First search in stamps dir
+ localdata = d.createCopy()
+ localdata.setVar('MULTIMACH_TARGET_SYS', '*')
+ localdata.setVar('PN', pn)
+ localdata.setVar('PV', '*')
+ localdata.setVar('PR', '*')
+ localdata.setVar('EXTENDPE', '')
+ stamp = localdata.getVar('STAMP')
+ if pn.startswith("gcc-source"):
+ # gcc-source shared workdir is a special case :(
+ stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
+
+ filespec = '%s.%s.sigdata.*' % (stamp, taskname)
+ foundall = False
+ import glob
+ for fullpath in glob.glob(filespec):
+ match = False
+ if taskhashlist:
+ for taskhash in taskhashlist:
+ if fullpath.endswith('.%s' % taskhash):
+ hashfiles[taskhash] = fullpath
+ if len(hashfiles) == len(taskhashlist):
+ foundall = True
+ break
+ else:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except OSError:
+ continue
+ hashval = get_hashval(fullpath)
+ hashfiles[hashval] = fullpath
+
+ if not taskhashlist or (len(filedates) < 2 and not foundall):
+ # That didn't work, look in sstate-cache
+ hashes = taskhashlist or ['?' * 32]
+ localdata = bb.data.createCopy(d)
+ for hashval in hashes:
+ localdata.setVar('PACKAGE_ARCH', '*')
+ localdata.setVar('TARGET_VENDOR', '*')
+ localdata.setVar('TARGET_OS', '*')
+ localdata.setVar('PN', pn)
+ localdata.setVar('PV', '*')
+ localdata.setVar('PR', '*')
+ localdata.setVar('BB_TASKHASH', hashval)
+ swspec = localdata.getVar('SSTATE_SWSPEC')
+ if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
+ localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
+ elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
+ localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
+ sstatename = taskname[3:]
+ filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
+
+ matchedfiles = glob.glob(filespec)
+ for fullpath in matchedfiles:
+ actual_hashval = get_hashval(fullpath)
+ if actual_hashval in hashfiles:
+ continue
+ hashfiles[hashval] = fullpath
+ if not taskhashlist:
+ try:
+ filedates[fullpath] = os.stat(fullpath).st_mtime
+ except:
+ continue
+
+ if taskhashlist:
+ return hashfiles
+ else:
+ return filedates
+
+bb.siggen.find_siginfo = find_siginfo
+
+
+def sstate_get_manifest_filename(task, d):
+ """
+ Return the sstate manifest file path for a particular task.
+ Also returns the datastore that can be used to query related variables.
+ """
+ d2 = d.createCopy()
+ extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
+ if extrainf:
+ d2.setVar("SSTATE_MANMACH", extrainf)
+ return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+ d2 = d
+ variant = ''
+ if taskdata2.startswith("virtual:multilib"):
+ variant = taskdata2.split(":")[2]
+ if variant not in multilibcache:
+ multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+ d2 = multilibcache[variant]
+
+ if taskdata.endswith("-native"):
+ pkgarchs = ["${BUILD_ARCH}"]
+ elif taskdata.startswith("nativesdk-"):
+ pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+ elif "-cross-canadian" in taskdata:
+ pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+ elif "-cross-" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+ elif "-crosssdk" in taskdata:
+ pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+ else:
+ pkgarchs = ['${MACHINE_ARCH}']
+ pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+ pkgarchs.append('allarch')
+ pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+ for pkgarch in pkgarchs:
+ manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+ if os.path.exists(manifest):
+ return manifest, d2
+ bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+ return None, d2
+
+
diff --git a/poky/meta/lib/oe/terminal.py b/poky/meta/lib/oe/terminal.py
new file mode 100644
index 000000000..94afe394e
--- /dev/null
+++ b/poky/meta/lib/oe/terminal.py
@@ -0,0 +1,308 @@
+import logging
+import oe.classutils
+import shlex
+from bb.process import Popen, ExecutionError
+from distutils.version import LooseVersion
+
+logger = logging.getLogger('BitBake.OE.Terminal')
+
+
+class UnsupportedTerminal(Exception):
+ pass
+
+class NoSupportedTerminals(Exception):
+ def __init__(self, terms):
+ self.terms = terms
+
+
+class Registry(oe.classutils.ClassRegistry):
+ command = None
+
+ def __init__(cls, name, bases, attrs):
+ super(Registry, cls).__init__(name.lower(), bases, attrs)
+
+ @property
+ def implemented(cls):
+ return bool(cls.command)
+
+
+class Terminal(Popen, metaclass=Registry):
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ fmt_sh_cmd = self.format_command(sh_cmd, title)
+ try:
+ Popen.__init__(self, fmt_sh_cmd, env=env)
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ raise UnsupportedTerminal(self.name)
+ else:
+ raise
+
+ def format_command(self, sh_cmd, title):
+ fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+ if isinstance(self.command, str):
+ return shlex.split(self.command.format(**fmt))
+ else:
+ return [element.format(**fmt) for element in self.command]
+
+class XTerminal(Terminal):
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ if not os.environ.get('DISPLAY'):
+ raise UnsupportedTerminal(self.name)
+
+class Gnome(XTerminal):
+ command = 'gnome-terminal -t "{title}" -x {command}'
+ priority = 2
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ # Recent versions of gnome-terminal does not support non-UTF8 charset:
+ # https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
+ # clearing the LC_ALL environment variable so it uses the locale.
+ # Once fixed on the gnome-terminal project, this should be removed.
+ if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
+
+ XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class Mate(XTerminal):
+ command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
+ priority = 2
+
+class Xfce(XTerminal):
+ command = 'xfce4-terminal -T "{title}" -e "{command}"'
+ priority = 2
+
+class Terminology(XTerminal):
+ command = 'terminology -T="{title}" -e {command}'
+ priority = 2
+
+class Konsole(XTerminal):
+ command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
+ priority = 2
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ # Check version
+ vernum = check_terminal_version("konsole")
+ if vernum and LooseVersion(vernum) < '2.0.0':
+ # Konsole from KDE 3.x
+ self.command = 'konsole -T "{title}" -e {command}'
+ elif vernum and LooseVersion(vernum) < '16.08.1':
+ # Konsole pre 16.08.01 Has nofork
+ self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
+ XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class XTerm(XTerminal):
+ command = 'xterm -T "{title}" -e {command}'
+ priority = 1
+
+class Rxvt(XTerminal):
+ command = 'rxvt -T "{title}" -e {command}'
+ priority = 1
+
+class Screen(Terminal):
+ command = 'screen -D -m -t "{title}" -S devshell {command}'
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ s_id = "devshell_%i" % os.getpid()
+ self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ msg = 'Screen started. Please connect in another terminal with ' \
+ '"screen -r %s"' % s_id
+ if (d):
+ bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
+ 0.5, 10), d)
+ else:
+ logger.warn(msg)
+
+class TmuxRunning(Terminal):
+ """Open a new pane in the current running tmux window"""
+ name = 'tmux-running'
+ command = 'tmux split-window "{command}"'
+ priority = 2.75
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ if not os.getenv('TMUX'):
+ raise UnsupportedTerminal('tmux is not running')
+
+ if not check_tmux_pane_size('tmux'):
+ raise UnsupportedTerminal('tmux pane too small or tmux < 1.9 version is being used')
+
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+class TmuxNewWindow(Terminal):
+ """Open a new window in the current running tmux session"""
+ name = 'tmux-new-window'
+ command = 'tmux new-window -n "{title}" "{command}"'
+ priority = 2.70
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ if not os.getenv('TMUX'):
+ raise UnsupportedTerminal('tmux is not running')
+
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+class Tmux(Terminal):
+ """Start a new tmux session and window"""
+ command = 'tmux new -d -s devshell -n devshell "{command}"'
+ priority = 0.75
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+ raise UnsupportedTerminal('tmux is not installed')
+
+ # TODO: consider using a 'devshell' session shared amongst all
+ # devshells, if it's already there, add a new window to it.
+ window_name = 'devshell-%i' % os.getpid()
+
+ self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+ Terminal.__init__(self, sh_cmd, title, env, d)
+
+ attach_cmd = 'tmux att -t {0}'.format(window_name)
+ msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
+ if d:
+ bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
+ else:
+ logger.warn(msg)
+
+class Custom(Terminal):
+ command = 'false' # This is a placeholder
+ priority = 3
+
+ def __init__(self, sh_cmd, title=None, env=None, d=None):
+ self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
+ if self.command:
+ if not '{command}' in self.command:
+ self.command += ' {command}'
+ Terminal.__init__(self, sh_cmd, title, env, d)
+ logger.warn('Custom terminal was started.')
+ else:
+ logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+ raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
+
+
+def prioritized():
+ return Registry.prioritized()
+
+def get_cmd_list():
+ terms = Registry.prioritized()
+ cmds = []
+ for term in terms:
+ if term.command:
+ cmds.append(term.command)
+ return cmds
+
+def spawn_preferred(sh_cmd, title=None, env=None, d=None):
+ """Spawn the first supported terminal, by priority"""
+ for terminal in prioritized():
+ try:
+ spawn(terminal.name, sh_cmd, title, env, d)
+ break
+ except UnsupportedTerminal:
+ continue
+ else:
+ raise NoSupportedTerminals(get_cmd_list())
+
+def spawn(name, sh_cmd, title=None, env=None, d=None):
+ """Spawn the specified terminal, by name"""
+ logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+ try:
+ terminal = Registry.registry[name]
+ except KeyError:
+ raise UnsupportedTerminal(name)
+
+ # We need to know when the command completes but some terminals (at least
+ # gnome and tmux) gives us no way to do this. We therefore write the pid
+ # to a file using a "phonehome" wrapper script, then monitor the pid
+ # until it exits.
+ import tempfile
+ import time
+ pidfile = tempfile.NamedTemporaryFile(delete = False).name
+ try:
+ sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
+ pipe = terminal(sh_cmd, title, env, d)
+ output = pipe.communicate()[0]
+ if output:
+ output = output.decode("utf-8")
+ if pipe.returncode != 0:
+ raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+ while os.stat(pidfile).st_size <= 0:
+ time.sleep(0.01)
+ continue
+ with open(pidfile, "r") as f:
+ pid = int(f.readline())
+ finally:
+ os.unlink(pidfile)
+
+ while True:
+ try:
+ os.kill(pid, 0)
+ time.sleep(0.1)
+ except OSError:
+ return
+
+def check_tmux_pane_size(tmux):
+ import subprocess as sub
+ # On older tmux versions (<1.9), return false. The reason
+ # is that there is no easy way to get the height of the active panel
+ # on current window without nested formats (available from version 1.9)
+ vernum = check_terminal_version("tmux")
+ if vernum and LooseVersion(vernum) < '1.9':
+ return False
+ try:
+ p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
+ shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
+ out, err = p.communicate()
+ size = int(out.strip())
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+
+ return size/2 >= 19
+
+def check_terminal_version(terminalName):
+ import subprocess as sub
+ try:
+ cmdversion = '%s --version' % terminalName
+ if terminalName.startswith('tmux'):
+ cmdversion = '%s -V' % terminalName
+ newenv = os.environ.copy()
+ newenv["LANG"] = "C"
+ p = sub.Popen(['sh', '-c', cmdversion], stdout=sub.PIPE, stderr=sub.PIPE, env=newenv)
+ out, err = p.communicate()
+ ver_info = out.decode().rstrip().split('\n')
+ except OSError as exc:
+ import errno
+ if exc.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ vernum = None
+ for ver in ver_info:
+ if ver.startswith('Konsole'):
+ vernum = ver.split(' ')[-1]
+ if ver.startswith('GNOME Terminal'):
+ vernum = ver.split(' ')[-1]
+ if ver.startswith('MATE Terminal'):
+ vernum = ver.split(' ')[-1]
+ if ver.startswith('tmux'):
+ vernum = ver.split()[-1]
+ return vernum
+
+def distro_name():
+ try:
+ p = Popen(['lsb_release', '-i'])
+ out, err = p.communicate()
+ distro = out.split(':')[1].strip().lower()
+ except:
+ distro = "unknown"
+ return distro
diff --git a/poky/meta/lib/oe/types.py b/poky/meta/lib/oe/types.py
new file mode 100644
index 000000000..4ae58acfa
--- /dev/null
+++ b/poky/meta/lib/oe/types.py
@@ -0,0 +1,153 @@
+import errno
+import re
+import os
+
+
+class OEList(list):
+ """OpenEmbedded 'list' type
+
+ Acts as an ordinary list, but is constructed from a string value and a
+ separator (optional), and re-joins itself when converted to a string with
+ str(). Set the variable type flag to 'list' to use this type, and the
+ 'separator' flag may be specified (defaulting to whitespace)."""
+
+ name = "list"
+
+ def __init__(self, value, separator = None):
+ if value is not None:
+ list.__init__(self, value.split(separator))
+ else:
+ list.__init__(self)
+
+ if separator is None:
+ self.separator = " "
+ else:
+ self.separator = separator
+
+ def __str__(self):
+ return self.separator.join(self)
+
+def choice(value, choices):
+ """OpenEmbedded 'choice' type
+
+ Acts as a multiple choice for the user. To use this, set the variable
+ type flag to 'choice', and set the 'choices' flag to a space separated
+ list of valid values."""
+ if not isinstance(value, str):
+ raise TypeError("choice accepts a string, not '%s'" % type(value))
+
+ value = value.lower()
+ choices = choices.lower()
+ if value not in choices.split():
+ raise ValueError("Invalid choice '%s'. Valid choices: %s" %
+ (value, choices))
+ return value
+
+class NoMatch(object):
+ """Stub python regex pattern object which never matches anything"""
+ def findall(self, string, flags=0):
+ return None
+
+ def finditer(self, string, flags=0):
+ return None
+
+ def match(self, flags=0):
+ return None
+
+ def search(self, string, flags=0):
+ return None
+
+ def split(self, string, maxsplit=0):
+ return None
+
+ def sub(pattern, repl, string, count=0):
+ return None
+
+ def subn(pattern, repl, string, count=0):
+ return None
+
+NoMatch = NoMatch()
+
+def regex(value, regexflags=None):
+ """OpenEmbedded 'regex' type
+
+ Acts as a regular expression, returning the pre-compiled regular
+ expression pattern object. To use this type, set the variable type flag
+ to 'regex', and optionally, set the 'regexflags' type to a space separated
+ list of the flags to control the regular expression matching (e.g.
+ FOO[regexflags] += 'ignorecase'). See the python documentation on the
+ 're' module for a list of valid flags."""
+
+ flagval = 0
+ if regexflags:
+ for flag in regexflags.split():
+ flag = flag.upper()
+ try:
+ flagval |= getattr(re, flag)
+ except AttributeError:
+ raise ValueError("Invalid regex flag '%s'" % flag)
+
+ if not value:
+ # Let's ensure that the default behavior for an undefined or empty
+ # variable is to match nothing. If the user explicitly wants to match
+ # anything, they can match '.*' instead.
+ return NoMatch
+
+ try:
+ return re.compile(value, flagval)
+ except re.error as exc:
+ raise ValueError("Invalid regex value '%s': %s" %
+ (value, exc.args[0]))
+
+def boolean(value):
+ """OpenEmbedded 'boolean' type
+
+ Valid values for true: 'yes', 'y', 'true', 't', '1'
+ Valid values for false: 'no', 'n', 'false', 'f', '0'
+ """
+
+ if not isinstance(value, str):
+ raise TypeError("boolean accepts a string, not '%s'" % type(value))
+
+ value = value.lower()
+ if value in ('yes', 'y', 'true', 't', '1'):
+ return True
+ elif value in ('no', 'n', 'false', 'f', '0'):
+ return False
+ raise ValueError("Invalid boolean value '%s'" % value)
+
+def integer(value, numberbase=10):
+ """OpenEmbedded 'integer' type
+
+ Defaults to base 10, but this can be specified using the optional
+ 'numberbase' flag."""
+
+ return int(value, int(numberbase))
+
+_float = float
+def float(value, fromhex='false'):
+ """OpenEmbedded floating point type
+
+ To use this type, set the type flag to 'float', and optionally set the
+ 'fromhex' flag to a true value (obeying the same rules as for the
+ 'boolean' type) if the value is in base 16 rather than base 10."""
+
+ if boolean(fromhex):
+ return _float.fromhex(value)
+ else:
+ return _float(value)
+
+def path(value, relativeto='', normalize='true', mustexist='false'):
+ value = os.path.join(relativeto, value)
+
+ if boolean(normalize):
+ value = os.path.normpath(value)
+
+ if boolean(mustexist):
+ try:
+ open(value, 'r')
+ except IOError as exc:
+ if exc.errno == errno.ENOENT:
+ raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
+
+ return value
diff --git a/poky/meta/lib/oe/useradd.py b/poky/meta/lib/oe/useradd.py
new file mode 100644
index 000000000..179ac76b5
--- /dev/null
+++ b/poky/meta/lib/oe/useradd.py
@@ -0,0 +1,68 @@
+import argparse
+import re
+
+class myArgumentParser(argparse.ArgumentParser):
+ def _print_message(self, message, file=None):
+ bb.warn("%s - %s: %s" % (d.getVar('PN'), pkg, message))
+
+ # This should never be called...
+ def exit(self, status=0, message=None):
+ message = message or ("%s - %s: useradd.bbclass: Argument parsing exited" % (d.getVar('PN'), pkg))
+ error(message)
+
+ def error(self, message):
+ raise bb.build.FuncFailed(message)
+
+def split_commands(params):
+ params = re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def split_args(params):
+ params = re.split('''[ \t]+(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params.strip())
+ # Remove any empty items
+ return [x for x in params if x]
+
+def build_useradd_parser():
+ # The following comes from --help on useradd from shadow
+ parser = myArgumentParser(prog='useradd')
+ parser.add_argument("-b", "--base-dir", metavar="BASE_DIR", help="base directory for the home directory of the new account")
+ parser.add_argument("-c", "--comment", metavar="COMMENT", help="GECOS field of the new account")
+ parser.add_argument("-d", "--home-dir", metavar="HOME_DIR", help="home directory of the new account")
+ parser.add_argument("-D", "--defaults", help="print or change default useradd configuration", action="store_true")
+ parser.add_argument("-e", "--expiredate", metavar="EXPIRE_DATE", help="expiration date of the new account")
+ parser.add_argument("-f", "--inactive", metavar="INACTIVE", help="password inactivity period of the new account")
+ parser.add_argument("-g", "--gid", metavar="GROUP", help="name or ID of the primary group of the new account")
+ parser.add_argument("-G", "--groups", metavar="GROUPS", help="list of supplementary groups of the new account")
+ parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
+ parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+ parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+ parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
+ parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new account")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
+ parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
+ parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
+ parser.add_argument("LOGIN", help="Login name of the new user")
+
+ return parser
+
+def build_groupadd_parser():
+ # The following comes from --help on groupadd from shadow
+ parser = myArgumentParser(prog='groupadd')
+ parser.add_argument("-f", "--force", help="exit successfully if the group already exists, and cancel -g if the GID is already used", action="store_true")
+ parser.add_argument("-g", "--gid", metavar="GID", help="use GID for the new group")
+ parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
+ parser.add_argument("-o", "--non-unique", help="allow to create groups with duplicate (non-unique) GID", action="store_true")
+ parser.add_argument("-p", "--password", metavar="PASSWORD", help="use this encrypted password for the new group")
+ parser.add_argument("-P", "--clear-password", metavar="CLEAR_PASSWORD", help="use this clear password for the new group")
+ parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
+ parser.add_argument("-r", "--system", help="create a system account", action="store_true")
+ parser.add_argument("GROUP", help="Group name of the new group")
+
+ return parser
diff --git a/poky/meta/lib/oe/utils.py b/poky/meta/lib/oe/utils.py
new file mode 100644
index 000000000..80f0442d0
--- /dev/null
+++ b/poky/meta/lib/oe/utils.py
@@ -0,0 +1,421 @@
+import subprocess
+
+def read_file(filename):
+ try:
+ f = open( filename, "r" )
+ except IOError as reason:
+ return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
+ else:
+ data = f.read().strip()
+ f.close()
+ return data
+ return None
+
+def ifelse(condition, iftrue = True, iffalse = False):
+ if condition:
+ return iftrue
+ else:
+ return iffalse
+
+def conditional(variable, checkvalue, truevalue, falsevalue, d):
+ if d.getVar(variable) == checkvalue:
+ return truevalue
+ else:
+ return falsevalue
+
+def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ if float(d.getVar(variable)) <= float(checkvalue):
+ return truevalue
+ else:
+ return falsevalue
+
+def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
+ if result <= 0:
+ return truevalue
+ else:
+ return falsevalue
+
+def both_contain(variable1, variable2, checkvalue, d):
+ val1 = d.getVar(variable1)
+ val2 = d.getVar(variable2)
+ val1 = set(val1.split())
+ val2 = set(val2.split())
+ if isinstance(checkvalue, str):
+ checkvalue = set(checkvalue.split())
+ else:
+ checkvalue = set(checkvalue)
+ if checkvalue.issubset(val1) and checkvalue.issubset(val2):
+ return " ".join(checkvalue)
+ else:
+ return ""
+
+def set_intersect(variable1, variable2, d):
+ """
+ Expand both variables, interpret them as lists of strings, and return the
+ intersection as a flattened string.
+
+ For example:
+ s1 = "a b c"
+ s2 = "b c d"
+ s3 = set_intersect(s1, s2)
+ => s3 = "b c"
+ """
+ val1 = set(d.getVar(variable1).split())
+ val2 = set(d.getVar(variable2).split())
+ return " ".join(val1 & val2)
+
+def prune_suffix(var, suffixes, d):
+ # See if var ends with any of the suffixes listed and
+ # remove it if found
+ for suffix in suffixes:
+ if var.endswith(suffix):
+ var = var.replace(suffix, "")
+
+ prefix = d.getVar("MLPREFIX")
+ if prefix and var.startswith(prefix):
+ var = var.replace(prefix, "")
+
+ return var
+
+def str_filter(f, str, d):
+ from re import match
+ return " ".join([x for x in str.split() if match(f, x, 0)])
+
+def str_filter_out(f, str, d):
+ from re import match
+ return " ".join([x for x in str.split() if not match(f, x, 0)])
+
+def build_depends_string(depends, task):
+ """Append a taskname to a string of dependencies as used by the [depends] flag"""
+ return " ".join(dep + ":" + task for dep in depends.split())
+
+def inherits(d, *classes):
+ """Return True if the metadata inherits any of the specified classes"""
+ return any(bb.data.inherits_class(cls, d) for cls in classes)
+
+def features_backfill(var,d):
+ # This construct allows the addition of new features to variable specified
+ # as var
+ # Example for var = "DISTRO_FEATURES"
+ # This construct allows the addition of new features to DISTRO_FEATURES
+ # that if not present would disable existing functionality, without
+ # disturbing distributions that have already set DISTRO_FEATURES.
+ # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
+ # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
+ features = (d.getVar(var) or "").split()
+ backfill = (d.getVar(var+"_BACKFILL") or "").split()
+ considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
+
+ addfeatures = []
+ for feature in backfill:
+ if feature not in features and feature not in considered:
+ addfeatures.append(feature)
+
+ if addfeatures:
+ d.appendVar(var, " " + " ".join(addfeatures))
+
+def all_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if *all* given features are set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if oe.utils.all_distro_features(d, "foo bar"):
+ bb.fatal("foo and bar are mutually exclusive DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.all_distro_features(d, "foo bar", "foo-and-bar.inc")
+ """
+ return bb.utils.contains("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def any_distro_features(d, features, truevalue="1", falsevalue=""):
+ """
+ Returns truevalue if at least *one* of the given features is set in DISTRO_FEATURES,
+ else falsevalue. The features can be given as single string or anything
+ that can be turned into a set.
+
+ This is a shorter, more flexible version of
+ bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d).
+
+ Without explicit true/false values it can be used directly where
+ Python expects a boolean:
+ if not oe.utils.any_distro_features(d, "foo bar"):
+ bb.fatal("foo, bar or both must be set in DISTRO_FEATURES")
+
+ With just a truevalue, it can be used to include files that are meant to be
+ used only when requested via DISTRO_FEATURES:
+ require ${@ oe.utils.any_distro_features(d, "foo bar", "foo-or-bar.inc")
+
+ """
+ return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
+
+def parallel_make(d):
+ """
+ Return the integer value for the number of parallel threads to use when
+ building, scraped out of PARALLEL_MAKE. If no parallelization option is
+ found, returns None
+
+ e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+ """
+ pm = (d.getVar('PARALLEL_MAKE') or '').split()
+ # look for '-j' and throw other options (e.g. '-l') away
+ while pm:
+ opt = pm.pop(0)
+ if opt == '-j':
+ v = pm.pop(0)
+ elif opt.startswith('-j'):
+ v = opt[2:].strip()
+ else:
+ continue
+
+ return int(v)
+
+ return None
+
+def parallel_make_argument(d, fmt, limit=None):
+ """
+ Helper utility to construct a parallel make argument from the number of
+ parallel threads specified in PARALLEL_MAKE.
+
+ Returns the input format string `fmt` where a single '%d' will be expanded
+ with the number of parallel threads to use. If `limit` is specified, the
+ number of parallel threads will be no larger than it. If no parallelization
+ option is found in PARALLEL_MAKE, returns an empty string
+
+ e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+ "-n 10"
+ """
+ v = parallel_make(d)
+ if v:
+ if limit:
+ v = min(limit, v)
+ return fmt % v
+ return ''
+
+def packages_filter_out_system(d):
+ """
+ Return a list of packages from PACKAGES with the "system" packages such as
+ PN-dbg PN-doc PN-locale-eb-gb removed.
+ """
+ pn = d.getVar('PN')
+ blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
+ localepkg = pn + "-locale-"
+ pkgs = []
+
+ for pkg in d.getVar('PACKAGES').split():
+ if pkg not in blacklist and localepkg not in pkg:
+ pkgs.append(pkg)
+ return pkgs
+
+def getstatusoutput(cmd):
+ return subprocess.getstatusoutput(cmd)
+
+
+def trim_version(version, num_parts=2):
+ """
+ Return just the first <num_parts> of <version>, split by periods. For
+ example, trim_version("1.2.3", 2) will return "1.2".
+ """
+ if type(version) is not str:
+ raise TypeError("Version should be a string")
+ if num_parts < 1:
+ raise ValueError("Cannot split to parts < 1")
+
+ parts = version.split(".")
+ trimmed = ".".join(parts[:num_parts])
+ return trimmed
+
+def cpu_count():
+ import multiprocessing
+ return multiprocessing.cpu_count()
+
+def execute_pre_post_process(d, cmds):
+ if cmds is None:
+ return
+
+ for cmd in cmds.strip().split(';'):
+ cmd = cmd.strip()
+ if cmd != '':
+ bb.note("Executing %s ..." % cmd)
+ bb.build.exec_func(cmd, d)
+
+def multiprocess_exec(commands, function):
+ import signal
+ import multiprocessing
+
+ if not commands:
+ return []
+
+ def init_worker():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ fails = []
+
+ def failures(res):
+ fails.append(res)
+
+ nproc = min(multiprocessing.cpu_count(), len(commands))
+ pool = bb.utils.multiprocessingpool(nproc, init_worker)
+
+ try:
+ mapresult = pool.map_async(function, commands, error_callback=failures)
+
+ pool.close()
+ pool.join()
+ results = mapresult.get()
+ except KeyboardInterrupt:
+ pool.terminate()
+ pool.join()
+ raise
+
+ if fails:
+ raise fails[0]
+
+ return results
+
+def squashspaces(string):
+ import re
+ return re.sub("\s+", " ", string).strip()
+
+def format_pkg_list(pkg_dict, ret_format=None):
+ output = []
+
+ if ret_format == "arch":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s" % (pkg, pkg_dict[pkg]["arch"]))
+ elif ret_format == "file":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["filename"], pkg_dict[pkg]["arch"]))
+ elif ret_format == "ver":
+ for pkg in sorted(pkg_dict):
+ output.append("%s %s %s" % (pkg, pkg_dict[pkg]["arch"], pkg_dict[pkg]["ver"]))
+ elif ret_format == "deps":
+ for pkg in sorted(pkg_dict):
+ for dep in pkg_dict[pkg]["deps"]:
+ output.append("%s|%s" % (pkg, dep))
+ else:
+ for pkg in sorted(pkg_dict):
+ output.append(pkg)
+
+ return '\n'.join(output)
+
+def host_gcc_version(d):
+ import re, subprocess
+
+ compiler = d.getVar("BUILD_CC")
+ try:
+ env = os.environ.copy()
+ env["PATH"] = d.getVar("PATH")
+ output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8")
+ except subprocess.CalledProcessError as e:
+ bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
+
+ match = re.match(".* (\d\.\d)\.\d.*", output.split('\n')[0])
+ if not match:
+ bb.fatal("Can't get compiler version from %s --version output" % compiler)
+
+ version = match.group(1)
+ return "-%s" % version if version in ("4.8", "4.9") else ""
+
+
+def get_multilib_datastore(variant, d):
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+ localdata.setVar("OVERRIDES", overrides)
+ localdata.setVar("MLPREFIX", variant + "-")
+ return localdata
+
+#
+# Python 2.7 doesn't have threaded pools (just multiprocessing)
+# so implement a version here
+#
+
+from queue import Queue
+from threading import Thread
+
+class ThreadedWorker(Thread):
+ """Thread executing tasks from a given tasks queue"""
+ def __init__(self, tasks, worker_init, worker_end):
+ Thread.__init__(self)
+ self.tasks = tasks
+ self.daemon = True
+
+ self.worker_init = worker_init
+ self.worker_end = worker_end
+
+ def run(self):
+ from queue import Empty
+
+ if self.worker_init is not None:
+ self.worker_init(self)
+
+ while True:
+ try:
+ func, args, kargs = self.tasks.get(block=False)
+ except Empty:
+ if self.worker_end is not None:
+ self.worker_end(self)
+ break
+
+ try:
+ func(self, *args, **kargs)
+ except Exception as e:
+ print(e)
+ finally:
+ self.tasks.task_done()
+
+class ThreadedPool:
+ """Pool of threads consuming tasks from a queue"""
+ def __init__(self, num_workers, num_tasks, worker_init=None,
+ worker_end=None):
+ self.tasks = Queue(num_tasks)
+ self.workers = []
+
+ for _ in range(num_workers):
+ worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+ self.workers.append(worker)
+
+ def start(self):
+ for worker in self.workers:
+ worker.start()
+
+ def add_task(self, func, *args, **kargs):
+ """Add a task to the queue"""
+ self.tasks.put((func, args, kargs))
+
+ def wait_completion(self):
+ """Wait for completion of all the tasks in the queue"""
+ self.tasks.join()
+ for worker in self.workers:
+ worker.join()
+
+def write_ld_so_conf(d):
+ # Some utils like prelink may not have the correct target library paths
+ # so write an ld.so.conf to help them
+ ldsoconf = d.expand("${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf")
+ if os.path.exists(ldsoconf):
+ bb.utils.remove(ldsoconf)
+ bb.utils.mkdirhier(os.path.dirname(ldsoconf))
+ with open(ldsoconf, "w") as f:
+ f.write(d.getVar("base_libdir") + '\n')
+ f.write(d.getVar("libdir") + '\n')
+
+class ImageQAFailed(bb.build.FuncFailed):
+ def __init__(self, description, name=None, logfile=None):
+ self.description = description
+ self.name = name
+ self.logfile=logfile
+
+ def __str__(self):
+ msg = 'Function failed: %s' % self.name
+ if self.description:
+ msg = msg + ' (%s)' % self.description
+
+ return msg
diff --git a/poky/meta/lib/oeqa/buildperf/__init__.py b/poky/meta/lib/oeqa/buildperf/__init__.py
new file mode 100644
index 000000000..605f429ec
--- /dev/null
+++ b/poky/meta/lib/oeqa/buildperf/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Build performance tests"""
+from .base import (BuildPerfTestCase,
+ BuildPerfTestLoader,
+ BuildPerfTestResult,
+ BuildPerfTestRunner,
+ KernelDropCaches,
+ runCmd2)
+from .test_basic import *
diff --git a/poky/meta/lib/oeqa/buildperf/base.py b/poky/meta/lib/oeqa/buildperf/base.py
new file mode 100644
index 000000000..ac6ee15d0
--- /dev/null
+++ b/poky/meta/lib/oeqa/buildperf/base.py
@@ -0,0 +1,511 @@
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Build performance test base classes and functionality"""
+import json
+import logging
+import os
+import re
+import resource
+import socket
+import shutil
+import time
+import unittest
+import xml.etree.ElementTree as ET
+from collections import OrderedDict
+from datetime import datetime, timedelta
+from functools import partial
+from multiprocessing import Process
+from multiprocessing import SimpleQueue
+from xml.dom import minidom
+
+import oe.path
+from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
+from oeqa.utils.git import GitError, GitRepo
+
+# Get logger for this module
+log = logging.getLogger('build-perf')
+
+# Our own version of runCmd which does not raise AssertErrors which would cause
+# errors to interpreted as failures
+runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
+
+
+class KernelDropCaches(object):
+ """Container of the functions for dropping kernel caches"""
+ sudo_passwd = None
+
+ @classmethod
+ def check(cls):
+ """Check permssions for dropping kernel caches"""
+ from getpass import getpass
+ from locale import getdefaultlocale
+ cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
+ ret = runCmd2(cmd, ignore_status=True, data=b'0')
+ if ret.output.startswith('sudo:'):
+ pass_str = getpass(
+ "\nThe script requires sudo access to drop caches between "
+ "builds (echo 3 > /proc/sys/vm/drop_caches).\n"
+ "Please enter your sudo password: ")
+ cls.sudo_passwd = bytes(pass_str, getdefaultlocale()[1])
+
+ @classmethod
+ def drop(cls):
+ """Drop kernel caches"""
+ cmd = ['sudo', '-k']
+ if cls.sudo_passwd:
+ cmd.append('-S')
+ input_data = cls.sudo_passwd + b'\n'
+ else:
+ cmd.append('-n')
+ input_data = b''
+ cmd += ['tee', '/proc/sys/vm/drop_caches']
+ input_data += b'3'
+ runCmd2(cmd, data=input_data)
+
+
+def str_to_fn(string):
+ """Convert string to a sanitized filename"""
+ return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
+
+
+class ResultsJsonEncoder(json.JSONEncoder):
+ """Extended encoder for build perf test results"""
+ unix_epoch = datetime.utcfromtimestamp(0)
+
+ def default(self, obj):
+ """Encoder for our types"""
+ if isinstance(obj, datetime):
+ # NOTE: we assume that all timestamps are in UTC time
+ return (obj - self.unix_epoch).total_seconds()
+ if isinstance(obj, timedelta):
+ return obj.total_seconds()
+ return json.JSONEncoder.default(self, obj)
+
+
+class BuildPerfTestResult(unittest.TextTestResult):
+ """Runner class for executing the individual tests"""
+ # List of test cases to run
+ test_run_queue = []
+
+ def __init__(self, out_dir, *args, **kwargs):
+ super(BuildPerfTestResult, self).__init__(*args, **kwargs)
+
+ self.out_dir = out_dir
+ self.hostname = socket.gethostname()
+ self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
+ self.start_time = self.elapsed_time = None
+ self.successes = []
+
+ def addSuccess(self, test):
+ """Record results from successful tests"""
+ super(BuildPerfTestResult, self).addSuccess(test)
+ self.successes.append(test)
+
+ def addError(self, test, err):
+ """Record results from crashed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addError(test, err)
+
+ def addFailure(self, test, err):
+ """Record results from failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addFailure(test, err)
+
+ def addExpectedFailure(self, test, err):
+ """Record results from expectedly failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addExpectedFailure(test, err)
+
+ def startTest(self, test):
+ """Pre-test hook"""
+ test.base_dir = self.out_dir
+ log.info("Executing test %s: %s", test.name, test.shortDescription())
+ self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
+ super(BuildPerfTestResult, self).startTest(test)
+
+ def startTestRun(self):
+ """Pre-run hook"""
+ self.start_time = datetime.utcnow()
+
+ def stopTestRun(self):
+ """Pre-run hook"""
+ self.elapsed_time = datetime.utcnow() - self.start_time
+
+ def all_results(self):
+ compound = [('SUCCESS', t, None) for t in self.successes] + \
+ [('FAILURE', t, m) for t, m in self.failures] + \
+ [('ERROR', t, m) for t, m in self.errors] + \
+ [('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
+ [('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
+ [('SKIPPED', t, m) for t, m in self.skipped]
+ return sorted(compound, key=lambda info: info[1].start_time)
+
+
+ def write_buildstats_json(self):
+ """Write buildstats file"""
+ buildstats = OrderedDict()
+ for _, test, _ in self.all_results():
+ for key, val in test.buildstats.items():
+ buildstats[test.name + '.' + key] = val
+ with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
+ json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
+
+
+ def write_results_json(self):
+ """Write test results into a json-formatted file"""
+ results = OrderedDict([('tester_host', self.hostname),
+ ('start_time', self.start_time),
+ ('elapsed_time', self.elapsed_time),
+ ('tests', OrderedDict())])
+
+ for status, test, reason in self.all_results():
+ test_result = OrderedDict([('name', test.name),
+ ('description', test.shortDescription()),
+ ('status', status),
+ ('start_time', test.start_time),
+ ('elapsed_time', test.elapsed_time),
+ ('measurements', test.measurements)])
+ if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
+ test_result['message'] = str(test.err[1])
+ test_result['err_type'] = test.err[0].__name__
+ test_result['err_output'] = reason
+ elif reason:
+ test_result['message'] = reason
+
+ results['tests'][test.name] = test_result
+
+ with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
+ json.dump(results, fobj, indent=4,
+ cls=ResultsJsonEncoder)
+
+ def write_results_xml(self):
+ """Write test results into a JUnit XML file"""
+ top = ET.Element('testsuites')
+ suite = ET.SubElement(top, 'testsuite')
+ suite.set('name', 'oeqa.buildperf')
+ suite.set('timestamp', self.start_time.isoformat())
+ suite.set('time', str(self.elapsed_time.total_seconds()))
+ suite.set('hostname', self.hostname)
+ suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
+ suite.set('errors', str(len(self.errors)))
+ suite.set('skipped', str(len(self.skipped)))
+
+ test_cnt = 0
+ for status, test, reason in self.all_results():
+ test_cnt += 1
+ testcase = ET.SubElement(suite, 'testcase')
+ testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
+ testcase.set('name', test.name)
+ testcase.set('description', test.shortDescription())
+ testcase.set('timestamp', test.start_time.isoformat())
+ testcase.set('time', str(test.elapsed_time.total_seconds()))
+ if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
+ if status in ('FAILURE', 'EXP_FAILURE'):
+ result = ET.SubElement(testcase, 'failure')
+ else:
+ result = ET.SubElement(testcase, 'error')
+ result.set('message', str(test.err[1]))
+ result.set('type', test.err[0].__name__)
+ result.text = reason
+ elif status == 'SKIPPED':
+ result = ET.SubElement(testcase, 'skipped')
+ result.text = reason
+ elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
+ raise TypeError("BUG: invalid test status '%s'" % status)
+
+ for data in test.measurements.values():
+ measurement = ET.SubElement(testcase, data['type'])
+ measurement.set('name', data['name'])
+ measurement.set('legend', data['legend'])
+ vals = data['values']
+ if data['type'] == BuildPerfTestCase.SYSRES:
+ ET.SubElement(measurement, 'time',
+ timestamp=vals['start_time'].isoformat()).text = \
+ str(vals['elapsed_time'].total_seconds())
+ attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
+ ET.SubElement(measurement, 'iostat', attrib=attrib)
+ attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
+ ET.SubElement(measurement, 'rusage', attrib=attrib)
+ elif data['type'] == BuildPerfTestCase.DISKUSAGE:
+ ET.SubElement(measurement, 'size').text = str(vals['size'])
+ else:
+ raise TypeError('BUG: unsupported measurement type')
+
+ suite.set('tests', str(test_cnt))
+
+ # Use minidom for pretty-printing
+ dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
+ with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
+ dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
+
+
+class BuildPerfTestCase(unittest.TestCase):
+ """Base class for build performance tests"""
+ SYSRES = 'sysres'
+ DISKUSAGE = 'diskusage'
+ build_target = None
+
+ def __init__(self, *args, **kwargs):
+ super(BuildPerfTestCase, self).__init__(*args, **kwargs)
+ self.name = self._testMethodName
+ self.base_dir = None
+ self.start_time = None
+ self.elapsed_time = None
+ self.measurements = OrderedDict()
+ self.buildstats = OrderedDict()
+ # self.err is supposed to be a tuple from sys.exc_info()
+ self.err = None
+ self.bb_vars = get_bb_vars()
+ # TODO: remove 'times' and 'sizes' arrays when globalres support is
+ # removed
+ self.times = []
+ self.sizes = []
+
+ @property
+ def tmp_dir(self):
+ return os.path.join(self.base_dir, self.name + '.tmp')
+
+ def shortDescription(self):
+ return super(BuildPerfTestCase, self).shortDescription() or ""
+
+ def setUp(self):
+ """Set-up fixture for each test"""
+ if not os.path.isdir(self.tmp_dir):
+ os.mkdir(self.tmp_dir)
+ if self.build_target:
+ self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
+
+ def tearDown(self):
+ """Tear-down fixture for each test"""
+ if os.path.isdir(self.tmp_dir):
+ shutil.rmtree(self.tmp_dir)
+
+ def run(self, *args, **kwargs):
+ """Run test"""
+ self.start_time = datetime.now()
+ super(BuildPerfTestCase, self).run(*args, **kwargs)
+ self.elapsed_time = datetime.now() - self.start_time
+
+ def run_cmd(self, cmd):
+ """Convenience method for running a command"""
+ cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
+ log.info("Logging command: %s", cmd_str)
+ try:
+ runCmd2(cmd)
+ except CommandError as err:
+ log.error("Command failed: %s", err.retcode)
+ raise
+
+ def _append_measurement(self, measurement):
+ """Simple helper for adding measurements results"""
+ if measurement['name'] in self.measurements:
+ raise ValueError('BUG: two measurements with the same name in {}'.format(
+ self.__class__.__name__))
+ self.measurements[measurement['name']] = measurement
+
+ def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
+ """Measure system resource usage of a command"""
+ def _worker(data_q, cmd, **kwargs):
+ """Worker process for measuring resources"""
+ try:
+ start_time = datetime.now()
+ ret = runCmd2(cmd, **kwargs)
+ etime = datetime.now() - start_time
+ rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
+ iostat = OrderedDict()
+ with open('/proc/{}/io'.format(os.getpid())) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':')
+ iostat[key] = int(val)
+ rusage = OrderedDict()
+ # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
+ # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
+ for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
+ 'ru_majflt', 'ru_inblock', 'ru_oublock',
+ 'ru_nvcsw', 'ru_nivcsw']:
+ rusage[key] = getattr(rusage_struct, key)
+ data_q.put({'ret': ret,
+ 'start_time': start_time,
+ 'elapsed_time': etime,
+ 'rusage': rusage,
+ 'iostat': iostat})
+ except Exception as err:
+ data_q.put(err)
+
+ cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
+ log.info("Timing command: %s", cmd_str)
+ data_q = SimpleQueue()
+ try:
+ proc = Process(target=_worker, args=(data_q, cmd,))
+ proc.start()
+ data = data_q.get()
+ proc.join()
+ if isinstance(data, Exception):
+ raise data
+ except CommandError:
+ log.error("Command '%s' failed", cmd_str)
+ raise
+ etime = data['elapsed_time']
+
+ measurement = OrderedDict([('type', self.SYSRES),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('start_time', data['start_time']),
+ ('elapsed_time', etime),
+ ('rusage', data['rusage']),
+ ('iostat', data['iostat'])])
+ if save_bs:
+ self.save_buildstats(name)
+
+ self._append_measurement(measurement)
+
+ # Append to 'times' array for globalres log
+ e_sec = etime.total_seconds()
+ self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
+ int((e_sec % 3600) / 60),
+ e_sec % 60))
+
+ def measure_disk_usage(self, path, name, legend, apparent_size=False):
+ """Estimate disk usage of a file or directory"""
+ cmd = ['du', '-s', '--block-size', '1024']
+ if apparent_size:
+ cmd.append('--apparent-size')
+ cmd.append(path)
+
+ ret = runCmd2(cmd)
+ size = int(ret.output.split()[0])
+ log.debug("Size of %s path is %s", path, size)
+ measurement = OrderedDict([('type', self.DISKUSAGE),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('size', size)])
+ self._append_measurement(measurement)
+ # Append to 'sizes' array for globalres log
+ self.sizes.append(str(size))
+
+ def save_buildstats(self, measurement_name):
+ """Save buildstats"""
+ def split_nevr(nevr):
+ """Split name and version information from recipe "nevr" string"""
+ n_e_v, revision = nevr.rsplit('-', 1)
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
+ n_e_v)
+ if not match:
+ # If we're not able to parse a version starting with a number, just
+ # take the part after last dash
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
+ n_e_v)
+ name = match.group('name')
+ version = match.group('version')
+ epoch = match.group('epoch')
+ return name, epoch, version, revision
+
+ def bs_to_json(filename):
+ """Convert (task) buildstats file into json format"""
+ bs_json = OrderedDict()
+ iostat = OrderedDict()
+ rusage = OrderedDict()
+ with open(filename) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Started':
+ start_time = datetime.utcfromtimestamp(float(val))
+ bs_json['start_time'] = start_time
+ elif key == 'Ended':
+ end_time = datetime.utcfromtimestamp(float(val))
+ elif key.startswith('IO '):
+ split = key.split()
+ iostat[split[1]] = int(val)
+ elif key.find('rusage') >= 0:
+ split = key.split()
+ ru_key = split[-1]
+ if ru_key in ('ru_stime', 'ru_utime'):
+ val = float(val)
+ else:
+ val = int(val)
+ rusage[ru_key] = rusage.get(ru_key, 0) + val
+ elif key == 'Status':
+ bs_json['status'] = val
+ bs_json['elapsed_time'] = end_time - start_time
+ bs_json['rusage'] = rusage
+ bs_json['iostat'] = iostat
+ return bs_json
+
+ log.info('Saving buildstats in JSON format')
+ bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
+ if len(bs_dirs) > 1:
+ log.warning("Multiple buildstats found for test %s, only "
+ "archiving the last one", self.name)
+ bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
+
+ buildstats = []
+ for fname in os.listdir(bs_dir):
+ recipe_dir = os.path.join(bs_dir, fname)
+ if not os.path.isdir(recipe_dir):
+ continue
+ name, epoch, version, revision = split_nevr(fname)
+ recipe_bs = OrderedDict((('name', name),
+ ('epoch', epoch),
+ ('version', version),
+ ('revision', revision),
+ ('tasks', OrderedDict())))
+ for task in os.listdir(recipe_dir):
+ recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
+ task))
+ buildstats.append(recipe_bs)
+
+ self.buildstats[measurement_name] = buildstats
+
+ def rm_tmp(self):
+ """Cleanup temporary/intermediate files and directories"""
+ log.debug("Removing temporary and cache files")
+ for name in ['bitbake.lock', 'conf/sanity_info',
+ self.bb_vars['TMPDIR']]:
+ oe.path.remove(name, recurse=True)
+
+ def rm_sstate(self):
+ """Remove sstate directory"""
+ log.debug("Removing sstate-cache")
+ oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
+
+ def rm_cache(self):
+ """Drop bitbake caches"""
+ oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
+
+ @staticmethod
+ def sync():
+ """Sync and drop kernel caches"""
+ runCmd2('bitbake -m', ignore_status=True)
+ log.debug("Syncing and dropping kernel caches""")
+ KernelDropCaches.drop()
+ os.sync()
+ # Wait a bit for all the dirty blocks to be written onto disk
+ time.sleep(3)
+
+
+class BuildPerfTestLoader(unittest.TestLoader):
+ """Test loader for build performance tests"""
+ sortTestMethodsUsing = None
+
+
+class BuildPerfTestRunner(unittest.TextTestRunner):
+ """Test loader for build performance tests"""
+ sortTestMethodsUsing = None
+
+ def __init__(self, out_dir, *args, **kwargs):
+ super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
+ self.out_dir = out_dir
+
+ def _makeResult(self):
+ return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
+ self.verbosity)
diff --git a/poky/meta/lib/oeqa/buildperf/test_basic.py b/poky/meta/lib/oeqa/buildperf/test_basic.py
new file mode 100644
index 000000000..6d6b01b04
--- /dev/null
+++ b/poky/meta/lib/oeqa/buildperf/test_basic.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2016, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+"""Basic set of build performance tests"""
+import os
+import shutil
+
+import oe.path
+from oeqa.buildperf import BuildPerfTestCase
+from oeqa.utils.commands import get_bb_var, get_bb_vars
+
+class Test1P1(BuildPerfTestCase):
+ build_target = 'core-image-sato'
+
+ def test1(self):
+ """Build core-image-sato"""
+ self.rm_tmp()
+ self.rm_sstate()
+ self.rm_cache()
+ self.sync()
+ self.measure_cmd_resources(['bitbake', self.build_target], 'build',
+ 'bitbake ' + self.build_target, save_bs=True)
+ self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
+ self.measure_disk_usage(get_bb_var("IMAGE_ROOTFS", self.build_target), 'rootfs', 'rootfs', True)
+
+
+class Test1P2(BuildPerfTestCase):
+ build_target = 'virtual/kernel'
+
+ def test12(self):
+ """Build virtual/kernel"""
+ # Build and cleans state in order to get all dependencies pre-built
+ self.run_cmd(['bitbake', self.build_target])
+ self.run_cmd(['bitbake', self.build_target, '-c', 'cleansstate'])
+
+ self.sync()
+ self.measure_cmd_resources(['bitbake', self.build_target], 'build',
+ 'bitbake ' + self.build_target)
+
+
+class Test1P3(BuildPerfTestCase):
+ build_target = 'core-image-sato'
+
+ def test13(self):
+ """Build core-image-sato with rm_work enabled"""
+ postfile = os.path.join(self.tmp_dir, 'postfile.conf')
+ with open(postfile, 'w') as fobj:
+ fobj.write('INHERIT += "rm_work"\n')
+
+ self.rm_tmp()
+ self.rm_sstate()
+ self.rm_cache()
+ self.sync()
+ cmd = ['bitbake', '-R', postfile, self.build_target]
+ self.measure_cmd_resources(cmd, 'build',
+ 'bitbake' + self.build_target,
+ save_bs=True)
+ self.measure_disk_usage(self.bb_vars['TMPDIR'], 'tmpdir', 'tmpdir')
+
+
+class Test2(BuildPerfTestCase):
+ build_target = 'core-image-sato'
+
+ def test2(self):
+ """Run core-image-sato do_rootfs with sstate"""
+ # Build once in order to populate sstate cache
+ self.run_cmd(['bitbake', self.build_target])
+
+ self.rm_tmp()
+ self.rm_cache()
+ self.sync()
+ cmd = ['bitbake', self.build_target, '-c', 'rootfs']
+ self.measure_cmd_resources(cmd, 'do_rootfs', 'bitbake do_rootfs')
+
+
+class Test3(BuildPerfTestCase):
+
+ def test3(self):
+ """Bitbake parsing (bitbake -p)"""
+ # Drop all caches and parse
+ self.rm_cache()
+ oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
+ self.measure_cmd_resources(['bitbake', '-p'], 'parse_1',
+ 'bitbake -p (no caches)')
+ # Drop tmp/cache
+ oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True)
+ self.measure_cmd_resources(['bitbake', '-p'], 'parse_2',
+ 'bitbake -p (no tmp/cache)')
+ # Parse with fully cached data
+ self.measure_cmd_resources(['bitbake', '-p'], 'parse_3',
+ 'bitbake -p (cached)')
+
+
+class Test4(BuildPerfTestCase):
+ build_target = 'core-image-sato'
+
+ def test4(self):
+ """eSDK metrics"""
+ self.run_cmd(['bitbake', '-c', 'do_populate_sdk_ext',
+ self.build_target])
+ self.bb_vars = get_bb_vars(None, self.build_target)
+ tmp_dir = self.bb_vars['TMPDIR']
+ installer = os.path.join(
+ self.bb_vars['SDK_DEPLOY'],
+ self.bb_vars['TOOLCHAINEXT_OUTPUTNAME'] + '.sh')
+ # Measure installer size
+ self.measure_disk_usage(installer, 'installer_bin', 'eSDK installer',
+ apparent_size=True)
+ # Measure deployment time and deployed size
+ deploy_dir = os.path.join(tmp_dir, 'esdk-deploy')
+ if os.path.exists(deploy_dir):
+ shutil.rmtree(deploy_dir)
+ self.sync()
+ self.measure_cmd_resources([installer, '-y', '-d', deploy_dir],
+ 'deploy', 'eSDK deploy')
+ #make sure bitbake is unloaded
+ self.sync()
+ self.measure_disk_usage(deploy_dir, 'deploy_dir', 'deploy dir',
+ apparent_size=True)
diff --git a/poky/meta/lib/oeqa/controllers/__init__.py b/poky/meta/lib/oeqa/controllers/__init__.py
new file mode 100644
index 000000000..8eda92763
--- /dev/null
+++ b/poky/meta/lib/oeqa/controllers/__init__.py
@@ -0,0 +1,3 @@
+# Enable other layers to have modules in the same named directory
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/poky/meta/lib/oeqa/controllers/masterimage.py b/poky/meta/lib/oeqa/controllers/masterimage.py
new file mode 100644
index 000000000..a2912fc56
--- /dev/null
+++ b/poky/meta/lib/oeqa/controllers/masterimage.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2014 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# This module adds support to testimage.bbclass to deploy images and run
+# tests using a "master image" - this is a "known good" image that is
+# installed onto the device as part of initial setup and will be booted into
+# with no interaction; we can then use it to deploy the image to be tested
+# to a second partition before running the tests.
+#
+# For an example master image, see core-image-testmaster
+# (meta/recipes-extended/images/core-image-testmaster.bb)
+
+import os
+import bb
+import traceback
+import time
+import subprocess
+
+import oeqa.targetcontrol
+import oeqa.utils.sshcontrol as sshcontrol
+import oeqa.utils.commands as commands
+from oeqa.utils import CommandError
+
+from abc import ABCMeta, abstractmethod
+
+class MasterImageHardwareTarget(oeqa.targetcontrol.BaseTarget, metaclass=ABCMeta):
+
+ supported_image_fstypes = ['tar.gz', 'tar.bz2']
+
+ def __init__(self, d):
+ super(MasterImageHardwareTarget, self).__init__(d)
+
+ # target ip
+ addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
+ self.ip = addr.split(":")[0]
+ try:
+ self.port = addr.split(":")[1]
+ except IndexError:
+ self.port = None
+ bb.note("Target IP: %s" % self.ip)
+ self.server_ip = d.getVar("TEST_SERVER_IP")
+ if not self.server_ip:
+ try:
+ self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
+ except Exception as e:
+ bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
+ bb.note("Server IP: %s" % self.server_ip)
+
+ # test rootfs + kernel
+ self.image_fstype = self.get_image_fstype(d)
+ self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
+ self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
+ if not os.path.isfile(self.rootfs):
+ # we could've checked that IMAGE_FSTYPES contains tar.gz but the config for running testimage might not be
+ # the same as the config with which the image was build, ie
+ # you bitbake core-image-sato with IMAGE_FSTYPES += "tar.gz"
+ # and your autobuilder overwrites the config, adds the test bits and runs bitbake core-image-sato -c testimage
+ bb.fatal("No rootfs found. Did you build the image ?\nIf yes, did you build it with IMAGE_FSTYPES += \"tar.gz\" ? \
+ \nExpected path: %s" % self.rootfs)
+ if not os.path.isfile(self.kernel):
+ bb.fatal("No kernel found. Expected path: %s" % self.kernel)
+
+ # master ssh connection
+ self.master = None
+ # if the user knows what they are doing, then by all means...
+ self.user_cmds = d.getVar("TEST_DEPLOY_CMDS")
+ self.deploy_cmds = None
+
+ # this is the name of the command that controls the power for a board
+ # e.g: TEST_POWERCONTROL_CMD = "/home/user/myscripts/powercontrol.py ${MACHINE} what-ever-other-args-the-script-wants"
+ # the command should take as the last argument "off" and "on" and "cycle" (off, on)
+ self.powercontrol_cmd = d.getVar("TEST_POWERCONTROL_CMD") or None
+ self.powercontrol_args = d.getVar("TEST_POWERCONTROL_EXTRA_ARGS", False) or ""
+
+ self.serialcontrol_cmd = d.getVar("TEST_SERIALCONTROL_CMD") or None
+ self.serialcontrol_args = d.getVar("TEST_SERIALCONTROL_EXTRA_ARGS", False) or ""
+
+ self.origenv = os.environ
+ if self.powercontrol_cmd or self.serialcontrol_cmd:
+ # the external script for controlling power might use ssh
+ # ssh + keys means we need the original user env
+ bborigenv = d.getVar("BB_ORIGENV", False) or {}
+ for key in bborigenv:
+ val = bborigenv.getVar(key)
+ if val is not None:
+ self.origenv[key] = str(val)
+
+ if self.powercontrol_cmd:
+ if self.powercontrol_args:
+ self.powercontrol_cmd = "%s %s" % (self.powercontrol_cmd, self.powercontrol_args)
+ if self.serialcontrol_cmd:
+ if self.serialcontrol_args:
+ self.serialcontrol_cmd = "%s %s" % (self.serialcontrol_cmd, self.serialcontrol_args)
+
+ def power_ctl(self, msg):
+ if self.powercontrol_cmd:
+ cmd = "%s %s" % (self.powercontrol_cmd, msg)
+ try:
+ commands.runCmd(cmd, assert_error=False, preexec_fn=os.setsid, env=self.origenv)
+ except CommandError as e:
+ bb.fatal(str(e))
+
+ def power_cycle(self, conn):
+ if self.powercontrol_cmd:
+ # be nice, don't just cut power
+ conn.run("shutdown -h now")
+ time.sleep(10)
+ self.power_ctl("cycle")
+ else:
+ status, output = conn.run("sync; { sleep 1; reboot; } > /dev/null &")
+ if status != 0:
+ bb.error("Failed rebooting target and no power control command defined. You need to manually reset the device.\n%s" % output)
+
+ def _wait_until_booted(self):
+ ''' Waits until the target device has booted (if we have just power cycled it) '''
+ # Subclasses with better methods of determining boot can override this
+ time.sleep(120)
+
+ def deploy(self):
+ # base class just sets the ssh log file for us
+ super(MasterImageHardwareTarget, self).deploy()
+ self.master = sshcontrol.SSHControl(ip=self.ip, logfile=self.sshlog, timeout=600, port=self.port)
+ status, output = self.master.run("cat /etc/masterimage")
+ if status != 0:
+ # We're not booted into the master image, so try rebooting
+ bb.plain("%s - booting into the master image" % self.pn)
+ self.power_ctl("cycle")
+ self._wait_until_booted()
+
+ bb.plain("%s - deploying image on target" % self.pn)
+ status, output = self.master.run("cat /etc/masterimage")
+ if status != 0:
+ bb.fatal("No ssh connectivity or target isn't running a master image.\n%s" % output)
+ if self.user_cmds:
+ self.deploy_cmds = self.user_cmds.split("\n")
+ try:
+ self._deploy()
+ except Exception as e:
+ bb.fatal("Failed deploying test image: %s" % e)
+
+ @abstractmethod
+ def _deploy(self):
+ pass
+
+ def start(self, extra_bootparams=None):
+ bb.plain("%s - boot test image on target" % self.pn)
+ self._start()
+ # set the ssh object for the target/test image
+ self.connection = sshcontrol.SSHControl(self.ip, logfile=self.sshlog, port=self.port)
+ bb.plain("%s - start running tests" % self.pn)
+
+ @abstractmethod
+ def _start(self):
+ pass
+
+ def stop(self):
+ bb.plain("%s - reboot/powercycle target" % self.pn)
+ self.power_cycle(self.master)
+
+
+class SystemdbootTarget(MasterImageHardwareTarget):
+
+ def __init__(self, d):
+ super(SystemdbootTarget, self).__init__(d)
+ # this the value we need to set in the LoaderEntryOneShot EFI variable
+ # so the system boots the 'test' bootloader label and not the default
+ # The first four bytes are EFI bits, and the rest is an utf-16le string
+ # (EFI vars values need to be utf-16)
+ # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
+ # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
+ self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
+ self.deploy_cmds = [
+ 'mount -L boot /boot',
+ 'mkdir -p /mnt/testrootfs',
+ 'mount -L testrootfs /mnt/testrootfs',
+ 'modprobe efivarfs',
+ 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
+ 'cp ~/test-kernel /boot',
+ 'rm -rf /mnt/testrootfs/*',
+ 'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
+ 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
+ ]
+
+ def _deploy(self):
+ # make sure these aren't mounted
+ self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
+ # from now on, every deploy cmd should return 0
+ # else an exception will be thrown by sshcontrol
+ self.master.ignore_status = False
+ self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
+ self.master.copy_to(self.kernel, "~/test-kernel")
+ for cmd in self.deploy_cmds:
+ self.master.run(cmd)
+
+ def _start(self, params=None):
+ self.power_cycle(self.master)
+ # there are better ways than a timeout but this should work for now
+ time.sleep(120)
+
+
+class SystemdbootTarget(MasterImageHardwareTarget):
+
+ def __init__(self, d):
+ super(SystemdbootTarget, self).__init__(d)
+ # this the value we need to set in the LoaderEntryOneShot EFI variable
+ # so the system boots the 'test' bootloader label and not the default
+ # The first four bytes are EFI bits, and the rest is an utf-16le string
+ # (EFI vars values need to be utf-16)
+ # $ echo -en "test\0" | iconv -f ascii -t utf-16le | hexdump -C
+ # 00000000 74 00 65 00 73 00 74 00 00 00 |t.e.s.t...|
+ self.efivarvalue = r'\x07\x00\x00\x00\x74\x00\x65\x00\x73\x00\x74\x00\x00\x00'
+ self.deploy_cmds = [
+ 'mount -L boot /boot',
+ 'mkdir -p /mnt/testrootfs',
+ 'mount -L testrootfs /mnt/testrootfs',
+ 'modprobe efivarfs',
+ 'mount -t efivarfs efivarfs /sys/firmware/efi/efivars',
+ 'cp ~/test-kernel /boot',
+ 'rm -rf /mnt/testrootfs/*',
+ 'tar xvf ~/test-rootfs.%s -C /mnt/testrootfs' % self.image_fstype,
+ 'printf "%s" > /sys/firmware/efi/efivars/LoaderEntryOneShot-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f' % self.efivarvalue
+ ]
+
+ def _deploy(self):
+ # make sure these aren't mounted
+ self.master.run("umount /boot; umount /mnt/testrootfs; umount /sys/firmware/efi/efivars;")
+ # from now on, every deploy cmd should return 0
+ # else an exception will be thrown by sshcontrol
+ self.master.ignore_status = False
+ self.master.copy_to(self.rootfs, "~/test-rootfs." + self.image_fstype)
+ self.master.copy_to(self.kernel, "~/test-kernel")
+ for cmd in self.deploy_cmds:
+ self.master.run(cmd)
+
+ def _start(self, params=None):
+ self.power_cycle(self.master)
+ # there are better ways than a timeout but this should work for now
+ time.sleep(120)
diff --git a/poky/meta/lib/oeqa/controllers/testtargetloader.py b/poky/meta/lib/oeqa/controllers/testtargetloader.py
new file mode 100644
index 000000000..b51d04b21
--- /dev/null
+++ b/poky/meta/lib/oeqa/controllers/testtargetloader.py
@@ -0,0 +1,68 @@
+import types
+import bb
+import os
+
+# This class is responsible for loading a test target controller
+class TestTargetLoader:
+
+ # Search oeqa.controllers module directory for and return a controller
+ # corresponding to the given target name.
+ # AttributeError raised if not found.
+ # ImportError raised if a provided module can not be imported.
+ def get_controller_module(self, target, bbpath):
+ controllerslist = self.get_controller_modulenames(bbpath)
+ bb.note("Available controller modules: %s" % str(controllerslist))
+ controller = self.load_controller_from_name(target, controllerslist)
+ return controller
+
+ # Return a list of all python modules in lib/oeqa/controllers for each
+ # layer in bbpath
+ def get_controller_modulenames(self, bbpath):
+
+ controllerslist = []
+
+ def add_controller_list(path):
+ if not os.path.exists(os.path.join(path, '__init__.py')):
+ bb.fatal('Controllers directory %s exists but is missing __init__.py' % path)
+ files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
+ for f in files:
+ module = 'oeqa.controllers.' + f[:-3]
+ if module not in controllerslist:
+ controllerslist.append(module)
+ else:
+ bb.warn("Duplicate controller module found for %s, only one added. Layers should create unique controller module names" % module)
+
+ for p in bbpath:
+ controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
+ bb.debug(2, 'Searching for target controllers in %s' % controllerpath)
+ if os.path.exists(controllerpath):
+ add_controller_list(controllerpath)
+ return controllerslist
+
+ # Search for and return a controller from given target name and
+ # set of module names.
+ # Raise AttributeError if not found.
+ # Raise ImportError if a provided module can not be imported
+ def load_controller_from_name(self, target, modulenames):
+ for name in modulenames:
+ obj = self.load_controller_from_module(target, name)
+ if obj:
+ return obj
+ raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
+
+ # Search for and return a controller or None from given module name
+ def load_controller_from_module(self, target, modulename):
+ obj = None
+ # import module, allowing it to raise import exception
+ module = __import__(modulename, globals(), locals(), [target])
+ # look for target class in the module, catching any exceptions as it
+ # is valid that a module may not have the target class.
+ try:
+ obj = getattr(module, target)
+ if obj:
+ from oeqa.targetcontrol import BaseTarget
+ if( not issubclass(obj, BaseTarget)):
+ bb.warn("Target {0} found, but subclass is not BaseTarget".format(target))
+ except:
+ obj = None
+ return obj
diff --git a/poky/meta/lib/oeqa/core/README b/poky/meta/lib/oeqa/core/README
new file mode 100644
index 000000000..d4fcda41f
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/README
@@ -0,0 +1,76 @@
+= OEQA (v2) Framework =
+
+== Introduction ==
+
+This is version 2 of the OEQA framework. Base clases are located in the
+'oeqa/core' directory and subsequent components must extend from these.
+
+The main design consideration was to implement the needed functionality on
+top of the Python unittest framework. To achieve this goal, the following
+modules are used:
+
+ * oeqa/core/runner.py: Provides OETestResult and OETestRunner base
+ classes extending the unittest class. These classes support exporting
+ results to different formats; currently RAW and XML support exist.
+
+ * oeqa/core/loader.py: Provides OETestLoader extending the unittest class.
+ It also features a unified implementation of decorator support and
+ filtering test cases.
+
+ * oeqa/core/case.py: Provides OETestCase base class extending
+ unittest.TestCase and provides access to the Test data (td), Test context
+ and Logger functionality.
+
+ * oeqa/core/decorator: Provides OETestDecorator, a new class to implement
+ decorators for Test cases.
+
+ * oeqa/core/context: Provides OETestContext, a high-level API for
+ loadTests and runTests of certain Test component and
+ OETestContextExecutor a base class to enable oe-test to discover/use
+ the Test component.
+
+Also, a new 'oe-test' runner is located under 'scripts', allowing scans for components
+that supports OETestContextExecutor (see below).
+
+== Terminology ==
+
+ * Test component: The area of testing in the Project, for example: runtime, SDK, eSDK, selftest.
+
+ * Test data: Data associated with the Test component. Currently we use bitbake datastore as
+ a Test data input.
+
+ * Test context: A context of what tests needs to be run and how to do it; this additionally
+ provides access to the Test data and could have custom methods and/or attrs.
+
+== oe-test ==
+
+The new tool, oe-test, has the ability to scan the code base for test components and provide
+a unified way to run test cases. Internally it scans folders inside oeqa module in order to find
+specific classes that implement a test component.
+
+== Usage ==
+
+Executing the example test component
+
+ $ source oe-init-build-env
+ $ oe-test core
+
+Getting help
+
+ $ oe-test -h
+
+== Creating new Test Component ==
+
+Adding a new test component the developer needs to extend OETestContext/OETestContextExecutor
+(from context.py) and OETestCase (from case.py)
+
+== Selftesting the framework ==
+
+Run all tests:
+
+ $ PATH=$PATH:../../ python3 -m unittest discover -s tests
+
+Run some test:
+
+ $ cd tests/
+ $ ./test_data.py
diff --git a/poky/meta/lib/oeqa/core/__init__.py b/poky/meta/lib/oeqa/core/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/__init__.py
diff --git a/poky/meta/lib/oeqa/core/case.py b/poky/meta/lib/oeqa/core/case.py
new file mode 100644
index 000000000..917a2aa3f
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/case.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+
+from oeqa.core.exception import OEQAMissingVariable
+
+def _validate_td_vars(td, td_vars, type_msg):
+ if td_vars:
+ for v in td_vars:
+ if not v in td:
+ raise OEQAMissingVariable("Test %s need %s variable but"\
+ " isn't into td" % (type_msg, v))
+
+class OETestCase(unittest.TestCase):
+ # TestContext and Logger instance set by OETestLoader.
+ tc = None
+ logger = None
+
+ # td has all the variables needed by the test cases
+ # is the same across all the test cases.
+ td = None
+
+ # td_vars has the variables needed by a test class
+ # or test case instance, if some var isn't into td a
+ # OEQAMissingVariable exception is raised
+ td_vars = None
+
+ @classmethod
+ def _oeSetUpClass(clss):
+ _validate_td_vars(clss.td, clss.td_vars, "class")
+ clss.setUpClassMethod()
+
+ @classmethod
+ def _oeTearDownClass(clss):
+ clss.tearDownClassMethod()
+
+ def _oeSetUp(self):
+ for d in self.decorators:
+ d.setUpDecorator()
+ self.setUpMethod()
+
+ def _oeTearDown(self):
+ for d in self.decorators:
+ d.tearDownDecorator()
+ self.tearDownMethod()
diff --git a/poky/meta/lib/oeqa/core/cases/__init__.py b/poky/meta/lib/oeqa/core/cases/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/cases/__init__.py
diff --git a/poky/meta/lib/oeqa/core/cases/example/data.json b/poky/meta/lib/oeqa/core/cases/example/data.json
new file mode 100644
index 000000000..21d6b16d1
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/cases/example/data.json
@@ -0,0 +1 @@
+{"ARCH": "x86", "IMAGE": "core-image-minimal"} \ No newline at end of file
diff --git a/poky/meta/lib/oeqa/core/cases/example/test_basic.py b/poky/meta/lib/oeqa/core/cases/example/test_basic.py
new file mode 100644
index 000000000..11cf3800c
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/cases/example/test_basic.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class OETestExample(OETestCase):
+ def test_example(self):
+ self.logger.info('IMAGE: %s' % self.td.get('IMAGE'))
+ self.assertEqual('core-image-minimal', self.td.get('IMAGE'))
+ self.logger.info('ARCH: %s' % self.td.get('ARCH'))
+ self.assertEqual('x86', self.td.get('ARCH'))
+
+class OETestExampleDepend(OETestCase):
+ @OETestDepends(['OETestExample.test_example'])
+ def test_example_depends(self):
+ pass
+
+ def test_example_no_depends(self):
+ pass
diff --git a/poky/meta/lib/oeqa/core/context.py b/poky/meta/lib/oeqa/core/context.py
new file mode 100644
index 000000000..acd547416
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/context.py
@@ -0,0 +1,191 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import json
+import time
+import logging
+import collections
+
+from oeqa.core.loader import OETestLoader
+from oeqa.core.runner import OETestRunner
+from oeqa.core.exception import OEQAMissingManifest, OEQATestNotFound
+
+class OETestContext(object):
+ loaderClass = OETestLoader
+ runnerClass = OETestRunner
+
+ files_dir = os.path.abspath(os.path.join(os.path.dirname(
+ os.path.abspath(__file__)), "../files"))
+
+ def __init__(self, td=None, logger=None):
+ if not type(td) is dict:
+ raise TypeError("td isn't dictionary type")
+
+ self.td = td
+ self.logger = logger
+ self._registry = {}
+ self._registry['cases'] = collections.OrderedDict()
+ self._results = {}
+
+ def _read_modules_from_manifest(self, manifest):
+ if not os.path.exists(manifest):
+ raise OEQAMissingManifest("Manifest does not exist on %s" % manifest)
+
+ modules = []
+ for line in open(manifest).readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ modules.append(line)
+
+ return modules
+
+ def skipTests(self, skips):
+ if not skips:
+ return
+ for test in self.suites:
+ for skip in skips:
+ if test.id().startswith(skip):
+ setattr(test, 'setUp', lambda: test.skipTest('Skip by the command line argument "%s"' % skip))
+
+ def loadTests(self, module_paths, modules=[], tests=[],
+ modules_manifest="", modules_required=[], filters={}):
+ if modules_manifest:
+ modules = self._read_modules_from_manifest(modules_manifest)
+
+ self.loader = self.loaderClass(self, module_paths, modules, tests,
+ modules_required, filters)
+ self.suites = self.loader.discover()
+
+ def runTests(self, skips=[]):
+ self.runner = self.runnerClass(self, descriptions=False, verbosity=2)
+
+ # Dinamically skip those tests specified though arguments
+ self.skipTests(skips)
+
+ self._run_start_time = time.time()
+ result = self.runner.run(self.suites)
+ self._run_end_time = time.time()
+
+ return result
+
+ def listTests(self, display_type):
+ self.runner = self.runnerClass(self, verbosity=2)
+ return self.runner.list_tests(self.suites, display_type)
+
+class OETestContextExecutor(object):
+ _context_class = OETestContext
+ _script_executor = 'oe-test'
+
+ name = 'core'
+ help = 'core test component example'
+ description = 'executes core test suite example'
+
+ default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases/example')]
+ default_test_data = os.path.join(default_cases[0], 'data.json')
+ default_tests = None
+
+ def register_commands(self, logger, subparsers):
+ self.parser = subparsers.add_parser(self.name, help=self.help,
+ description=self.description, group='components')
+
+ self.default_output_log = '%s-results-%s.log' % (self.name,
+ time.strftime("%Y%m%d%H%M%S"))
+ self.parser.add_argument('--output-log', action='store',
+ default=self.default_output_log,
+ help="results output log, default: %s" % self.default_output_log)
+
+ group = self.parser.add_mutually_exclusive_group()
+ group.add_argument('--run-tests', action='store', nargs='+',
+ default=self.default_tests,
+ help="tests to run in <module>[.<class>[.<name>]]")
+ group.add_argument('--list-tests', action='store',
+ choices=('module', 'class', 'name'),
+ help="lists available tests")
+
+ if self.default_test_data:
+ self.parser.add_argument('--test-data-file', action='store',
+ default=self.default_test_data,
+ help="data file to load, default: %s" % self.default_test_data)
+ else:
+ self.parser.add_argument('--test-data-file', action='store',
+ help="data file to load")
+
+ if self.default_cases:
+ self.parser.add_argument('CASES_PATHS', action='store',
+ default=self.default_cases, nargs='*',
+ help="paths to directories with test cases, default: %s"\
+ % self.default_cases)
+ else:
+ self.parser.add_argument('CASES_PATHS', action='store',
+ nargs='+', help="paths to directories with test cases")
+
+ self.parser.set_defaults(func=self.run)
+
+ def _setup_logger(self, logger, args):
+ formatter = logging.Formatter('%(asctime)s - ' + self.name + \
+ ' - %(levelname)s - %(message)s')
+ sh = logger.handlers[0]
+ sh.setFormatter(formatter)
+ fh = logging.FileHandler(args.output_log)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+
+ return logger
+
+ def _process_args(self, logger, args):
+ self.tc_kwargs = {}
+ self.tc_kwargs['init'] = {}
+ self.tc_kwargs['load'] = {}
+ self.tc_kwargs['list'] = {}
+ self.tc_kwargs['run'] = {}
+
+ self.tc_kwargs['init']['logger'] = self._setup_logger(logger, args)
+ if args.test_data_file:
+ self.tc_kwargs['init']['td'] = json.load(
+ open(args.test_data_file, "r"))
+ else:
+ self.tc_kwargs['init']['td'] = {}
+
+ if args.run_tests:
+ self.tc_kwargs['load']['modules'] = args.run_tests
+ self.tc_kwargs['load']['modules_required'] = args.run_tests
+ else:
+ self.tc_kwargs['load']['modules'] = []
+
+ self.tc_kwargs['run']['skips'] = []
+
+ self.module_paths = args.CASES_PATHS
+
+ def _pre_run(self):
+ pass
+
+ def run(self, logger, args):
+ self._process_args(logger, args)
+
+ self.tc = self._context_class(**self.tc_kwargs['init'])
+ try:
+ self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
+ except OEQATestNotFound as ex:
+ logger.error(ex)
+ sys.exit(1)
+
+ if args.list_tests:
+ rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
+ else:
+ self._pre_run()
+ rc = self.tc.runTests(**self.tc_kwargs['run'])
+ rc.logDetails()
+ rc.logSummary(self.name)
+
+ output_link = os.path.join(os.path.dirname(args.output_log),
+ "%s-results.log" % self.name)
+ if os.path.exists(output_link):
+ os.remove(output_link)
+ os.symlink(args.output_log, output_link)
+
+ return rc
+
+_executor_class = OETestContextExecutor
diff --git a/poky/meta/lib/oeqa/core/decorator/__init__.py b/poky/meta/lib/oeqa/core/decorator/__init__.py
new file mode 100644
index 000000000..855b6b9d2
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/__init__.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from functools import wraps
+from abc import abstractmethod
+
+decoratorClasses = set()
+
+def registerDecorator(obj):
+ decoratorClasses.add(obj)
+ return obj
+
+class OETestDecorator(object):
+ case = None # Reference of OETestCase decorated
+ attrs = None # Attributes to be loaded by decorator implementation
+
+ def __init__(self, *args, **kwargs):
+ if not self.attrs:
+ return
+
+ for idx, attr in enumerate(self.attrs):
+ if attr in kwargs:
+ value = kwargs[attr]
+ else:
+ value = args[idx]
+ setattr(self, attr, value)
+
+ def __call__(self, func):
+ @wraps(func)
+ def wrapped_f(*args, **kwargs):
+ self.attrs = self.attrs # XXX: Enables OETestLoader discover
+ return func(*args, **kwargs)
+ return wrapped_f
+
+ # OETestLoader call it when is loading test cases.
+ # XXX: Most methods would change the registry for later
+ # processing; be aware that filtrate method needs to
+ # run later than bind, so there could be data (in the
+ # registry) of a cases that were filtered.
+ def bind(self, registry, case):
+ self.case = case
+ self.logger = case.tc.logger
+ self.case.decorators.append(self)
+
+ # OETestRunner call this method when tries to run
+ # the test case.
+ def setUpDecorator(self):
+ pass
+
+ # OETestRunner call it after a test method has been
+ # called even if the method raised an exception.
+ def tearDownDecorator(self):
+ pass
+
+class OETestDiscover(OETestDecorator):
+
+ # OETestLoader call it after discover test cases
+ # needs to return the cases to be run.
+ @staticmethod
+ def discover(registry):
+ return registry['cases']
+
+class OETestFilter(OETestDecorator):
+
+ # OETestLoader call it while loading the tests
+ # in loadTestsFromTestCase method, it needs to
+ # return a bool, True if needs to be filtered.
+ # This method must consume the filter used.
+ @abstractmethod
+ def filtrate(self, filters):
+ return False
diff --git a/poky/meta/lib/oeqa/core/decorator/data.py b/poky/meta/lib/oeqa/core/decorator/data.py
new file mode 100644
index 000000000..ff7bdd98b
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/data.py
@@ -0,0 +1,98 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.exception import OEQAMissingVariable
+
+from . import OETestDecorator, registerDecorator
+
+def has_feature(td, feature):
+ """
+ Checks for feature in DISTRO_FEATURES or IMAGE_FEATURES.
+ """
+
+ if (feature in td.get('DISTRO_FEATURES', '') or
+ feature in td.get('IMAGE_FEATURES', '')):
+ return True
+ return False
+
+@registerDecorator
+class skipIfDataVar(OETestDecorator):
+ """
+ Skip test based on value of a data store's variable.
+
+ It will get the info of var from the data store and will
+ check it against value; if are equal it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('var', 'value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %r value is %r to skip test' %
+ (self.var, self.value))
+ self.logger.debug(msg)
+ if self.case.td.get(self.var) == self.value:
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotDataVar(OETestDecorator):
+ """
+ Skip test based on value of a data store's variable.
+
+ It will get the info of var from the data store and will
+ check it against value; if are not equal it will skip the
+ test with msg as the reason.
+ """
+
+ attrs = ('var', 'value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %r value is not %r to skip test' %
+ (self.var, self.value))
+ self.logger.debug(msg)
+ if not self.case.td.get(self.var) == self.value:
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class skipIfNotInDataVar(OETestDecorator):
+ """
+ Skip test if value is not in data store's variable.
+ """
+
+ attrs = ('var', 'value', 'msg')
+ def setUpDecorator(self):
+ msg = ('Checking if %r value is in %r to run '
+ 'the test' % (self.var, self.value))
+ self.logger.debug(msg)
+ if not self.value in self.case.td.get(self.var):
+ self.case.skipTest(self.msg)
+
+@registerDecorator
+class OETestDataDepends(OETestDecorator):
+ attrs = ('td_depends',)
+
+ def setUpDecorator(self):
+ for v in self.td_depends:
+ try:
+ value = self.case.td[v]
+ except KeyError:
+ raise OEQAMissingVariable("Test case need %s variable but"\
+ " isn't into td" % v)
+
+@registerDecorator
+class skipIfNotFeature(OETestDecorator):
+ """
+ Skip test based on DISTRO_FEATURES.
+
+ value must be in distro features or it will skip the test
+ with msg as the reason.
+ """
+
+ attrs = ('value', 'msg')
+
+ def setUpDecorator(self):
+ msg = ('Checking if %s is in DISTRO_FEATURES '
+ 'or IMAGE_FEATURES' % (self.value))
+ self.logger.debug(msg)
+ if not has_feature(self.case.td, self.value):
+ self.case.skipTest(self.msg)
diff --git a/poky/meta/lib/oeqa/core/decorator/depends.py b/poky/meta/lib/oeqa/core/decorator/depends.py
new file mode 100644
index 000000000..baa04341c
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/depends.py
@@ -0,0 +1,100 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from unittest import SkipTest
+
+from oeqa.core.threaded import OETestRunnerThreaded
+from oeqa.core.exception import OEQADependency
+
+from . import OETestDiscover, registerDecorator
+
+def _add_depends(registry, case, depends):
+ module_name = case.__module__
+ class_name = case.__class__.__name__
+
+ case_id = case.id()
+
+ for depend in depends:
+ dparts = depend.split('.')
+
+ if len(dparts) == 1:
+ depend_id = ".".join((module_name, class_name, dparts[0]))
+ elif len(dparts) == 2:
+ depend_id = ".".join((module_name, dparts[0], dparts[1]))
+ else:
+ depend_id = depend
+
+ if not case_id in registry:
+ registry[case_id] = []
+ if not depend_id in registry[case_id]:
+ registry[case_id].append(depend_id)
+
+def _validate_test_case_depends(cases, depends):
+ for case in depends:
+ if not case in cases:
+ continue
+ for dep in depends[case]:
+ if not dep in cases:
+ raise OEQADependency("TestCase %s depends on %s and isn't available"\
+ ", cases available %s." % (case, dep, str(cases.keys())))
+
+def _order_test_case_by_depends(cases, depends):
+ def _dep_resolve(graph, node, resolved, seen):
+ seen.append(node)
+ for edge in graph[node]:
+ if edge not in resolved:
+ if edge in seen:
+ raise OEQADependency("Test cases %s and %s have a circular" \
+ " dependency." % (node, edge))
+ _dep_resolve(graph, edge, resolved, seen)
+ resolved.append(node)
+
+ dep_graph = {}
+ dep_graph['__root__'] = cases.keys()
+ for case in cases:
+ if case in depends:
+ dep_graph[case] = depends[case]
+ else:
+ dep_graph[case] = []
+
+ cases_ordered = []
+ _dep_resolve(dep_graph, '__root__', cases_ordered, [])
+ cases_ordered.remove('__root__')
+
+ return [cases[case_id] for case_id in cases_ordered]
+
+def _skipTestDependency(case, depends):
+ if isinstance(case.tc.runner, OETestRunnerThreaded):
+ import threading
+ results = case.tc._results[threading.get_ident()]
+ else:
+ results = case.tc._results
+
+ skipReasons = ['errors', 'failures', 'skipped']
+
+ for reason in skipReasons:
+ for test, _ in results[reason]:
+ if test.id() in depends:
+ raise SkipTest("Test case %s depends on %s and was in %s." \
+ % (case.id(), test.id(), reason))
+
+@registerDecorator
+class OETestDepends(OETestDiscover):
+ attrs = ('depends',)
+
+ def bind(self, registry, case):
+ super(OETestDepends, self).bind(registry, case)
+ if not registry.get('depends'):
+ registry['depends'] = {}
+ _add_depends(registry['depends'], case, self.depends)
+
+ @staticmethod
+ def discover(registry):
+ if registry.get('depends'):
+ _validate_test_case_depends(registry['cases'], registry['depends'])
+ return _order_test_case_by_depends(registry['cases'], registry['depends'])
+ else:
+ return [registry['cases'][case_id] for case_id in registry['cases']]
+
+ def setUpDecorator(self):
+ _skipTestDependency(self.case, self.depends)
diff --git a/poky/meta/lib/oeqa/core/decorator/oeid.py b/poky/meta/lib/oeqa/core/decorator/oeid.py
new file mode 100644
index 000000000..ea8017a55
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/oeid.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from . import OETestFilter, registerDecorator
+from oeqa.core.utils.misc import intToList
+
+def _idFilter(oeid, filters):
+ return False if oeid in filters else True
+
+@registerDecorator
+class OETestID(OETestFilter):
+ attrs = ('oeid',)
+
+ def bind(self, registry, case):
+ super(OETestID, self).bind(registry, case)
+
+ def filtrate(self, filters):
+ if filters.get('oeid'):
+ filterx = intToList(filters['oeid'], 'oeid')
+ del filters['oeid']
+ if _idFilter(self.oeid, filterx):
+ return True
+ return False
diff --git a/poky/meta/lib/oeqa/core/decorator/oetag.py b/poky/meta/lib/oeqa/core/decorator/oetag.py
new file mode 100644
index 000000000..ad38ab78a
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/oetag.py
@@ -0,0 +1,24 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from . import OETestFilter, registerDecorator
+from oeqa.core.utils.misc import strToList
+
+def _tagFilter(tags, filters):
+ return False if set(tags) & set(filters) else True
+
+@registerDecorator
+class OETestTag(OETestFilter):
+ attrs = ('oetag',)
+
+ def bind(self, registry, case):
+ super(OETestTag, self).bind(registry, case)
+ self.oetag = strToList(self.oetag, 'oetag')
+
+ def filtrate(self, filters):
+ if filters.get('oetag'):
+ filterx = strToList(filters['oetag'], 'oetag')
+ del filters['oetag']
+ if _tagFilter(self.oetag, filterx):
+ return True
+ return False
diff --git a/poky/meta/lib/oeqa/core/decorator/oetimeout.py b/poky/meta/lib/oeqa/core/decorator/oetimeout.py
new file mode 100644
index 000000000..f85e7d979
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/decorator/oetimeout.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from . import OETestDecorator, registerDecorator
+
+import signal
+from threading import Timer
+
+from oeqa.core.threaded import OETestRunnerThreaded
+from oeqa.core.exception import OEQATimeoutError
+
+@registerDecorator
+class OETimeout(OETestDecorator):
+ attrs = ('oetimeout',)
+
+ def setUpDecorator(self):
+ self.logger.debug("Setting up a %d second(s) timeout" % self.oetimeout)
+
+ if isinstance(self.case.tc.runner, OETestRunnerThreaded):
+ self.timeouted = False
+ def _timeoutHandler():
+ self.timeouted = True
+
+ self.timer = Timer(self.oetimeout, _timeoutHandler)
+ self.timer.start()
+ else:
+ timeout = self.oetimeout
+ def _timeoutHandler(signum, frame):
+ raise OEQATimeoutError("Timed out after %s "
+ "seconds of execution" % timeout)
+
+ self.alarmSignal = signal.signal(signal.SIGALRM, _timeoutHandler)
+ signal.alarm(self.oetimeout)
+
+ def tearDownDecorator(self):
+ if isinstance(self.case.tc.runner, OETestRunnerThreaded):
+ self.timer.cancel()
+ self.logger.debug("Removed Timer handler")
+ if self.timeouted:
+ raise OEQATimeoutError("Timed out after %s "
+ "seconds of execution" % self.oetimeout)
+ else:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, self.alarmSignal)
+ self.logger.debug("Removed SIGALRM handler")
diff --git a/poky/meta/lib/oeqa/core/exception.py b/poky/meta/lib/oeqa/core/exception.py
new file mode 100644
index 000000000..732f2efde
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/exception.py
@@ -0,0 +1,23 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+class OEQAException(Exception):
+ pass
+
+class OEQATimeoutError(OEQAException):
+ pass
+
+class OEQAMissingVariable(OEQAException):
+ pass
+
+class OEQADependency(OEQAException):
+ pass
+
+class OEQAMissingManifest(OEQAException):
+ pass
+
+class OEQAPreRun(OEQAException):
+ pass
+
+class OEQATestNotFound(OEQAException):
+ pass
diff --git a/poky/meta/lib/oeqa/core/loader.py b/poky/meta/lib/oeqa/core/loader.py
new file mode 100644
index 000000000..a4744dee0
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/loader.py
@@ -0,0 +1,355 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import re
+import sys
+import unittest
+import inspect
+
+from oeqa.core.utils.path import findFile
+from oeqa.core.utils.test import getSuiteModules, getCaseID
+
+from oeqa.core.exception import OEQATestNotFound
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator import decoratorClasses, OETestDecorator, \
+ OETestFilter, OETestDiscover
+
+# When loading tests, the unittest framework stores any exceptions and
+# displays them only when the run method is called.
+#
+# For our purposes, it is better to raise the exceptions in the loading
+# step rather than waiting to run the test suite.
+#
+# Generate the function definition because this differ across python versions
+# Python >= 3.4.4 uses tree parameters instead four but for example Python 3.5.3
+# ueses four parameters so isn't incremental.
+_failed_test_args = inspect.getargspec(unittest.loader._make_failed_test).args
+exec("""def _make_failed_test(%s): raise exception""" % ', '.join(_failed_test_args))
+unittest.loader._make_failed_test = _make_failed_test
+
+def _find_duplicated_modules(suite, directory):
+ for module in getSuiteModules(suite):
+ path = findFile('%s.py' % module, directory)
+ if path:
+ raise ImportError("Duplicated %s module found in %s" % (module, path))
+
+def _built_modules_dict(modules):
+ modules_dict = {}
+
+ if modules == None:
+ return modules_dict
+
+ for module in modules:
+ # Assumption: package and module names do not contain upper case
+ # characters, whereas class names do
+ m = re.match(r'^(\w+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII)
+
+ module_name, class_name, test_name = m.groups()
+
+ if module_name and module_name not in modules_dict:
+ modules_dict[module_name] = {}
+ if class_name and class_name not in modules_dict[module_name]:
+ modules_dict[module_name][class_name] = []
+ if test_name and test_name not in modules_dict[module_name][class_name]:
+ modules_dict[module_name][class_name].append(test_name)
+
+ return modules_dict
+
+class OETestLoader(unittest.TestLoader):
+ caseClass = OETestCase
+
+ kwargs_names = ['testMethodPrefix', 'sortTestMethodUsing', 'suiteClass',
+ '_top_level_dir']
+
+ def __init__(self, tc, module_paths, modules, tests, modules_required,
+ filters, *args, **kwargs):
+ self.tc = tc
+
+ self.modules = _built_modules_dict(modules)
+
+ self.tests = tests
+ self.modules_required = modules_required
+
+ self.filters = filters
+ self.decorator_filters = [d for d in decoratorClasses if \
+ issubclass(d, OETestFilter)]
+ self._validateFilters(self.filters, self.decorator_filters)
+ self.used_filters = [d for d in self.decorator_filters
+ for f in self.filters
+ if f in d.attrs]
+
+ if isinstance(module_paths, str):
+ module_paths = [module_paths]
+ elif not isinstance(module_paths, list):
+ raise TypeError('module_paths must be a str or a list of str')
+ self.module_paths = module_paths
+
+ for kwname in self.kwargs_names:
+ if kwname in kwargs:
+ setattr(self, kwname, kwargs[kwname])
+
+ self._patchCaseClass(self.caseClass)
+
+ super(OETestLoader, self).__init__()
+
+ def _patchCaseClass(self, testCaseClass):
+ # Adds custom attributes to the OETestCase class
+ setattr(testCaseClass, 'tc', self.tc)
+ setattr(testCaseClass, 'td', self.tc.td)
+ setattr(testCaseClass, 'logger', self.tc.logger)
+
+ def _validateFilters(self, filters, decorator_filters):
+ # Validate if filter isn't empty
+ for key,value in filters.items():
+ if not value:
+ raise TypeError("Filter %s specified is empty" % key)
+
+ # Validate unique attributes
+ attr_filters = [attr for clss in decorator_filters \
+ for attr in clss.attrs]
+ dup_attr = [attr for attr in attr_filters
+ if attr_filters.count(attr) > 1]
+ if dup_attr:
+ raise TypeError('Detected duplicated attribute(s) %s in filter'
+ ' decorators' % ' ,'.join(dup_attr))
+
+ # Validate if filter is supported
+ for f in filters:
+ if f not in attr_filters:
+ classes = ', '.join([d.__name__ for d in decorator_filters])
+ raise TypeError('Found "%s" filter but not declared in any of '
+ '%s decorators' % (f, classes))
+
+ def _registerTestCase(self, case):
+ case_id = case.id()
+ self.tc._registry['cases'][case_id] = case
+
+ def _handleTestCaseDecorators(self, case):
+ def _handle(obj):
+ if isinstance(obj, OETestDecorator):
+ if not obj.__class__ in decoratorClasses:
+ raise Exception("Decorator %s isn't registered" \
+ " in decoratorClasses." % obj.__name__)
+ obj.bind(self.tc._registry, case)
+
+ def _walk_closure(obj):
+ if hasattr(obj, '__closure__') and obj.__closure__:
+ for f in obj.__closure__:
+ obj = f.cell_contents
+ _handle(obj)
+ _walk_closure(obj)
+ method = getattr(case, case._testMethodName, None)
+ _walk_closure(method)
+
+ def _filterTest(self, case):
+ """
+ Returns True if test case must be filtered, False otherwise.
+ """
+ # XXX; If the module has more than one namespace only use
+ # the first to support run the whole module specifying the
+ # <module_name>.[test_class].[test_name]
+ module_name_small = case.__module__.split('.')[0]
+ module_name = case.__module__
+
+ class_name = case.__class__.__name__
+ test_name = case._testMethodName
+
+ if self.modules:
+ module = None
+ try:
+ module = self.modules[module_name_small]
+ except KeyError:
+ try:
+ module = self.modules[module_name]
+ except KeyError:
+ return True
+
+ if module:
+ if not class_name in module:
+ return True
+
+ if module[class_name]:
+ if test_name not in module[class_name]:
+ return True
+
+ # Decorator filters
+ if self.filters and isinstance(case, OETestCase):
+ filters = self.filters.copy()
+ case_decorators = [cd for cd in case.decorators
+ if cd.__class__ in self.used_filters]
+
+ # Iterate over case decorators to check if needs to be filtered.
+ for cd in case_decorators:
+ if cd.filtrate(filters):
+ return True
+
+ # Case is missing one or more decorators for all the filters
+ # being used, so filter test case.
+ if filters:
+ return True
+
+ return False
+
+ def _getTestCase(self, testCaseClass, tcName):
+ if not hasattr(testCaseClass, '__oeqa_loader') and \
+ issubclass(testCaseClass, OETestCase):
+ # In order to support data_vars validation
+ # monkey patch the default setUp/tearDown{Class} to use
+ # the ones provided by OETestCase
+ setattr(testCaseClass, 'setUpClassMethod',
+ getattr(testCaseClass, 'setUpClass'))
+ setattr(testCaseClass, 'tearDownClassMethod',
+ getattr(testCaseClass, 'tearDownClass'))
+ setattr(testCaseClass, 'setUpClass',
+ testCaseClass._oeSetUpClass)
+ setattr(testCaseClass, 'tearDownClass',
+ testCaseClass._oeTearDownClass)
+
+ # In order to support decorators initialization
+ # monkey patch the default setUp/tearDown to use
+ # a setUpDecorators/tearDownDecorators that methods
+ # will call setUp/tearDown original methods.
+ setattr(testCaseClass, 'setUpMethod',
+ getattr(testCaseClass, 'setUp'))
+ setattr(testCaseClass, 'tearDownMethod',
+ getattr(testCaseClass, 'tearDown'))
+ setattr(testCaseClass, 'setUp', testCaseClass._oeSetUp)
+ setattr(testCaseClass, 'tearDown', testCaseClass._oeTearDown)
+
+ setattr(testCaseClass, '__oeqa_loader', True)
+
+ case = testCaseClass(tcName)
+ if isinstance(case, OETestCase):
+ setattr(case, 'decorators', [])
+
+ return case
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """
+ Returns a suite of all tests cases contained in testCaseClass.
+ """
+ if issubclass(testCaseClass, unittest.suite.TestSuite):
+ raise TypeError("Test cases should not be derived from TestSuite." \
+ " Maybe you meant to derive %s from TestCase?" \
+ % testCaseClass.__name__)
+ if not issubclass(testCaseClass, unittest.case.TestCase):
+ raise TypeError("Test %s is not derived from %s" % \
+ (testCaseClass.__name__, unittest.case.TestCase.__name__))
+
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+
+ suite = []
+ for tcName in testCaseNames:
+ case = self._getTestCase(testCaseClass, tcName)
+ # Filer by case id
+ if not (self.tests and not 'all' in self.tests
+ and not getCaseID(case) in self.tests):
+ self._handleTestCaseDecorators(case)
+
+ # Filter by decorators
+ if not self._filterTest(case):
+ self._registerTestCase(case)
+ suite.append(case)
+
+ return self.suiteClass(suite)
+
+ def _required_modules_validation(self):
+ """
+ Search in Test context registry if a required
+ test is found, raise an exception when not found.
+ """
+
+ for module in self.modules_required:
+ found = False
+
+ # The module name is splitted to only compare the
+ # first part of a test case id.
+ comp_len = len(module.split('.'))
+ for case in self.tc._registry['cases']:
+ case_comp = '.'.join(case.split('.')[0:comp_len])
+ if module == case_comp:
+ found = True
+ break
+
+ if not found:
+ raise OEQATestNotFound("Not found %s in loaded test cases" % \
+ module)
+
+ def discover(self):
+ big_suite = self.suiteClass()
+ for path in self.module_paths:
+ _find_duplicated_modules(big_suite, path)
+ suite = super(OETestLoader, self).discover(path,
+ pattern='*.py', top_level_dir=path)
+ big_suite.addTests(suite)
+
+ cases = None
+ discover_classes = [clss for clss in decoratorClasses
+ if issubclass(clss, OETestDiscover)]
+ for clss in discover_classes:
+ cases = clss.discover(self.tc._registry)
+
+ if self.modules_required:
+ self._required_modules_validation()
+
+ return self.suiteClass(cases) if cases else big_suite
+
+ def _filterModule(self, module):
+ if module.__name__ in sys.builtin_module_names:
+ msg = 'Tried to import %s test module but is a built-in'
+ raise ImportError(msg % module.__name__)
+
+ # XXX; If the module has more than one namespace only use
+ # the first to support run the whole module specifying the
+ # <module_name>.[test_class].[test_name]
+ module_name_small = module.__name__.split('.')[0]
+ module_name = module.__name__
+
+ # Normal test modules are loaded if no modules were specified,
+ # if module is in the specified module list or if 'all' is in
+ # module list.
+ # Underscore modules are loaded only if specified in module list.
+ load_module = True if not module_name.startswith('_') \
+ and (not self.modules \
+ or module_name in self.modules \
+ or module_name_small in self.modules \
+ or 'all' in self.modules) \
+ else False
+
+ load_underscore = True if module_name.startswith('_') \
+ and (module_name in self.modules or \
+ module_name_small in self.modules) \
+ else False
+
+ return (load_module, load_underscore)
+
+
+ # XXX After Python 3.5, remove backward compatibility hacks for
+ # use_load_tests deprecation via *args and **kws. See issue 16662.
+ if sys.version_info >= (3,5):
+ def loadTestsFromModule(self, module, *args, pattern=None, **kws):
+ """
+ Returns a suite of all tests cases contained in module.
+ """
+ load_module, load_underscore = self._filterModule(module)
+
+ if load_module or load_underscore:
+ return super(OETestLoader, self).loadTestsFromModule(
+ module, *args, pattern=pattern, **kws)
+ else:
+ return self.suiteClass()
+ else:
+ def loadTestsFromModule(self, module, use_load_tests=True):
+ """
+ Returns a suite of all tests cases contained in module.
+ """
+ load_module, load_underscore = self._filterModule(module)
+
+ if load_module or load_underscore:
+ return super(OETestLoader, self).loadTestsFromModule(
+ module, use_load_tests)
+ else:
+ return self.suiteClass()
diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py
new file mode 100644
index 000000000..13cdf5ba5
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/runner.py
@@ -0,0 +1,277 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import time
+import unittest
+import logging
+import re
+
+xmlEnabled = False
+try:
+ import xmlrunner
+ from xmlrunner.result import _XMLTestResult as _TestResult
+ from xmlrunner.runner import XMLTestRunner as _TestRunner
+ xmlEnabled = True
+except ImportError:
+ # use the base runner instead
+ from unittest import TextTestResult as _TestResult
+ from unittest import TextTestRunner as _TestRunner
+
+class OEStreamLogger(object):
+ def __init__(self, logger):
+ self.logger = logger
+ self.buffer = ""
+
+ def write(self, msg):
+ if len(msg) > 1 and msg[0] != '\n':
+ if '...' in msg:
+ self.buffer += msg
+ elif self.buffer:
+ self.buffer += msg
+ self.logger.log(logging.INFO, self.buffer)
+ self.buffer = ""
+ else:
+ self.logger.log(logging.INFO, msg)
+
+ def flush(self):
+ for handler in self.logger.handlers:
+ handler.flush()
+
+class OETestResult(_TestResult):
+ def __init__(self, tc, *args, **kwargs):
+ super(OETestResult, self).__init__(*args, **kwargs)
+
+ self.tc = tc
+ self._tc_map_results()
+
+ def startTest(self, test):
+ # Allow us to trigger the testcase buffer mode on a per test basis
+ # so stdout/stderr are only printed upon failure. Enables debugging
+ # but clean output
+ if hasattr(test, "buffer"):
+ self.buffer = test.buffer
+ super(OETestResult, self).startTest(test)
+
+ def _tc_map_results(self):
+ self.tc._results['failures'] = self.failures
+ self.tc._results['errors'] = self.errors
+ self.tc._results['skipped'] = self.skipped
+ self.tc._results['expectedFailures'] = self.expectedFailures
+
+ def logSummary(self, component, context_msg=''):
+ elapsed_time = self.tc._run_end_time - self.tc._run_start_time
+ self.tc.logger.info("SUMMARY:")
+ self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
+ context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
+ elapsed_time))
+
+ if self.wasSuccessful():
+ msg = "%s - OK - All required tests passed" % component
+ else:
+ msg = "%s - FAIL - Required tests failed" % component
+ skipped = len(self.tc._results['skipped'])
+ if skipped:
+ msg += " (skipped=%d)" % skipped
+ self.tc.logger.info(msg)
+
+ def _getDetailsNotPassed(self, case, type, desc):
+ found = False
+
+ for (scase, msg) in self.tc._results[type]:
+ # XXX: When XML reporting is enabled scase is
+ # xmlrunner.result._TestInfo instance instead of
+ # string.
+ if xmlEnabled:
+ if case.id() == scase.test_id:
+ found = True
+ break
+ scase_str = scase.test_id
+ else:
+ if case == scase:
+ found = True
+ break
+ scase_str = str(scase)
+
+ # When fails at module or class level the class name is passed as string
+ # so figure out to see if match
+ m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str)
+ if m:
+ if case.__class__.__module__ == m.group('module_name'):
+ found = True
+ break
+
+ m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str)
+ if m:
+ class_name = "%s.%s" % (case.__class__.__module__,
+ case.__class__.__name__)
+
+ if class_name == m.group('class_name'):
+ found = True
+ break
+
+ if found:
+ return (found, msg)
+
+ return (found, None)
+
+ def logDetails(self):
+ self.tc.logger.info("RESULTS:")
+ for case_name in self.tc._registry['cases']:
+ case = self.tc._registry['cases'][case_name]
+
+ result_types = ['failures', 'errors', 'skipped', 'expectedFailures']
+ result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL']
+
+ fail = False
+ desc = None
+ for idx, name in enumerate(result_types):
+ (fail, msg) = self._getDetailsNotPassed(case, result_types[idx],
+ result_desc[idx])
+ if fail:
+ desc = result_desc[idx]
+ break
+
+ oeid = -1
+ if hasattr(case, 'decorators'):
+ for d in case.decorators:
+ if hasattr(d, 'oeid'):
+ oeid = d.oeid
+
+ if fail:
+ self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
+ oeid, desc))
+ else:
+ self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(),
+ oeid, 'PASSED'))
+
+class OEListTestsResult(object):
+ def wasSuccessful(self):
+ return True
+
+class OETestRunner(_TestRunner):
+ streamLoggerClass = OEStreamLogger
+
+ def __init__(self, tc, *args, **kwargs):
+ if xmlEnabled:
+ if not kwargs.get('output'):
+ kwargs['output'] = os.path.join(os.getcwd(),
+ 'TestResults_%s_%s' % (time.strftime("%Y%m%d%H%M%S"), os.getpid()))
+
+ kwargs['stream'] = self.streamLoggerClass(tc.logger)
+ super(OETestRunner, self).__init__(*args, **kwargs)
+ self.tc = tc
+ self.resultclass = OETestResult
+
+ # XXX: The unittest-xml-reporting package defines _make_result method instead
+ # of _makeResult standard on unittest.
+ if xmlEnabled:
+ def _make_result(self):
+ """
+ Creates a TestResult object which will be used to store
+ information about the executed tests.
+ """
+ # override in subclasses if necessary.
+ return self.resultclass(self.tc,
+ self.stream, self.descriptions, self.verbosity, self.elapsed_times
+ )
+ else:
+ def _makeResult(self):
+ return self.resultclass(self.tc, self.stream, self.descriptions,
+ self.verbosity)
+
+
+ def _walk_suite(self, suite, func):
+ for obj in suite:
+ if isinstance(obj, unittest.suite.TestSuite):
+ if len(obj._tests):
+ self._walk_suite(obj, func)
+ elif isinstance(obj, unittest.case.TestCase):
+ func(self.tc.logger, obj)
+ self._walked_cases = self._walked_cases + 1
+
+ def _list_tests_name(self, suite):
+ from oeqa.core.decorator.oeid import OETestID
+ from oeqa.core.decorator.oetag import OETestTag
+
+ self._walked_cases = 0
+
+ def _list_cases_without_id(logger, case):
+
+ found_id = False
+ if hasattr(case, 'decorators'):
+ for d in case.decorators:
+ if isinstance(d, OETestID):
+ found_id = True
+
+ if not found_id:
+ logger.info('oeid missing for %s' % case.id())
+
+ def _list_cases(logger, case):
+ oeid = None
+ oetag = None
+
+ if hasattr(case, 'decorators'):
+ for d in case.decorators:
+ if isinstance(d, OETestID):
+ oeid = d.oeid
+ elif isinstance(d, OETestTag):
+ oetag = d.oetag
+
+ logger.info("%s\t%s\t\t%s" % (oeid, oetag, case.id()))
+
+ self.tc.logger.info("Listing test cases that don't have oeid ...")
+ self._walk_suite(suite, _list_cases_without_id)
+ self.tc.logger.info("-" * 80)
+
+ self.tc.logger.info("Listing all available tests:")
+ self._walked_cases = 0
+ self.tc.logger.info("id\ttag\t\ttest")
+ self.tc.logger.info("-" * 80)
+ self._walk_suite(suite, _list_cases)
+ self.tc.logger.info("-" * 80)
+ self.tc.logger.info("Total found:\t%s" % self._walked_cases)
+
+ def _list_tests_class(self, suite):
+ self._walked_cases = 0
+
+ curr = {}
+ def _list_classes(logger, case):
+ if not 'module' in curr or curr['module'] != case.__module__:
+ curr['module'] = case.__module__
+ logger.info(curr['module'])
+
+ if not 'class' in curr or curr['class'] != \
+ case.__class__.__name__:
+ curr['class'] = case.__class__.__name__
+ logger.info(" -- %s" % curr['class'])
+
+ logger.info(" -- -- %s" % case._testMethodName)
+
+ self.tc.logger.info("Listing all available test classes:")
+ self._walk_suite(suite, _list_classes)
+
+ def _list_tests_module(self, suite):
+ self._walked_cases = 0
+
+ listed = []
+ def _list_modules(logger, case):
+ if not case.__module__ in listed:
+ if case.__module__.startswith('_'):
+ logger.info("%s (hidden)" % case.__module__)
+ else:
+ logger.info(case.__module__)
+ listed.append(case.__module__)
+
+ self.tc.logger.info("Listing all available test modules:")
+ self._walk_suite(suite, _list_modules)
+
+ def list_tests(self, suite, display_type):
+ if display_type == 'name':
+ self._list_tests_name(suite)
+ elif display_type == 'class':
+ self._list_tests_class(suite)
+ elif display_type == 'module':
+ self._list_tests_module(suite)
+
+ return OEListTestsResult()
diff --git a/poky/meta/lib/oeqa/core/target/__init__.py b/poky/meta/lib/oeqa/core/target/__init__.py
new file mode 100644
index 000000000..d2468bc25
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/target/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from abc import abstractmethod
+
+class OETarget(object):
+
+ def __init__(self, logger, *args, **kwargs):
+ self.logger = logger
+
+ @abstractmethod
+ def start(self):
+ pass
+
+ @abstractmethod
+ def stop(self):
+ pass
+
+ @abstractmethod
+ def run(self, cmd, timeout=None):
+ pass
+
+ @abstractmethod
+ def copyTo(self, localSrc, remoteDst):
+ pass
+
+ @abstractmethod
+ def copyFrom(self, remoteSrc, localDst):
+ pass
+
+ @abstractmethod
+ def copyDirTo(self, localSrc, remoteDst):
+ pass
diff --git a/poky/meta/lib/oeqa/core/target/qemu.py b/poky/meta/lib/oeqa/core/target/qemu.py
new file mode 100644
index 000000000..bf3b633f0
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/target/qemu.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import signal
+import time
+
+from .ssh import OESSHTarget
+from oeqa.utils.qemurunner import QemuRunner
+
+supported_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
+
+class OEQemuTarget(OESSHTarget):
+ def __init__(self, logger, ip, server_ip, timeout=300, user='root',
+ port=None, machine='', rootfs='', kernel='', kvm=False,
+ dump_dir='', dump_host_cmds='', display='', bootlog='',
+ tmpdir='', dir_image='', boottime=60, **kwargs):
+
+ super(OEQemuTarget, self).__init__(logger, ip, server_ip, timeout,
+ user, port)
+
+ self.ip = ip
+ self.server_ip = server_ip
+ self.machine = machine
+ self.rootfs = rootfs
+ self.kernel = kernel
+ self.kvm = kvm
+
+ self.runner = QemuRunner(machine=machine, rootfs=rootfs, tmpdir=tmpdir,
+ deploy_dir_image=dir_image, display=display,
+ logfile=bootlog, boottime=boottime,
+ use_kvm=kvm, dump_dir=dump_dir,
+ dump_host_cmds=dump_host_cmds, logger=logger)
+
+ def start(self, params=None, extra_bootparams=None):
+ if self.runner.start(params, extra_bootparams=extra_bootparams):
+ self.ip = self.runner.ip
+ self.server_ip = self.runner.server_ip
+ else:
+ self.stop()
+ raise RuntimeError("FAILED to start qemu - check the task log and the boot log")
+
+ def stop(self):
+ self.runner.stop()
diff --git a/poky/meta/lib/oeqa/core/target/ssh.py b/poky/meta/lib/oeqa/core/target/ssh.py
new file mode 100644
index 000000000..151b99a77
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/target/ssh.py
@@ -0,0 +1,267 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import time
+import select
+import logging
+import subprocess
+import codecs
+
+from . import OETarget
+
+class OESSHTarget(OETarget):
+ def __init__(self, logger, ip, server_ip, timeout=300, user='root',
+ port=None, **kwargs):
+ if not logger:
+ logger = logging.getLogger('target')
+ logger.setLevel(logging.INFO)
+ filePath = os.path.join(os.getcwd(), 'remoteTarget.log')
+ fileHandler = logging.FileHandler(filePath, 'w', 'utf-8')
+ formatter = logging.Formatter(
+ '%(asctime)s.%(msecs)03d %(levelname)s: %(message)s',
+ '%H:%M:%S')
+ fileHandler.setFormatter(formatter)
+ logger.addHandler(fileHandler)
+
+ super(OESSHTarget, self).__init__(logger)
+ self.ip = ip
+ self.server_ip = server_ip
+ self.timeout = timeout
+ self.user = user
+ ssh_options = [
+ '-o', 'UserKnownHostsFile=/dev/null',
+ '-o', 'StrictHostKeyChecking=no',
+ '-o', 'LogLevel=ERROR'
+ ]
+ self.ssh = ['ssh', '-l', self.user ] + ssh_options
+ self.scp = ['scp'] + ssh_options
+ if port:
+ self.ssh = self.ssh + [ '-p', port ]
+ self.scp = self.scp + [ '-P', port ]
+
+ def start(self, **kwargs):
+ pass
+
+ def stop(self, **kwargs):
+ pass
+
+ def _run(self, command, timeout=None, ignore_status=True):
+ """
+ Runs command in target using SSHProcess.
+ """
+ self.logger.debug("[Running]$ %s" % " ".join(command))
+
+ starttime = time.time()
+ status, output = SSHCall(command, self.logger, timeout)
+ self.logger.debug("[Command returned '%d' after %.2f seconds]"
+ "" % (status, time.time() - starttime))
+
+ if status and not ignore_status:
+ raise AssertionError("Command '%s' returned non-zero exit "
+ "status %d:\n%s" % (command, status, output))
+
+ return (status, output)
+
+ def run(self, command, timeout=None):
+ """
+ Runs command in target.
+
+ command: Command to run on target.
+ timeout: <value>: Kill command after <val> seconds.
+ None: Kill command default value seconds.
+ 0: No timeout, runs until return.
+ """
+ targetCmd = 'export PATH=/usr/sbin:/sbin:/usr/bin:/bin; %s' % command
+ sshCmd = self.ssh + [self.ip, targetCmd]
+
+ if timeout:
+ processTimeout = timeout
+ elif timeout==0:
+ processTimeout = None
+ else:
+ processTimeout = self.timeout
+
+ status, output = self._run(sshCmd, processTimeout, True)
+ self.logger.debug('Command: %s\nOutput: %s\n' % (command, output))
+ return (status, output)
+
+ def copyTo(self, localSrc, remoteDst):
+ """
+ Copy file to target.
+
+ If local file is symlink, recreate symlink in target.
+ """
+ if os.path.islink(localSrc):
+ link = os.readlink(localSrc)
+ dstDir, dstBase = os.path.split(remoteDst)
+ sshCmd = 'cd %s; ln -s %s %s' % (dstDir, link, dstBase)
+ return self.run(sshCmd)
+
+ else:
+ remotePath = '%s@%s:%s' % (self.user, self.ip, remoteDst)
+ scpCmd = self.scp + [localSrc, remotePath]
+ return self._run(scpCmd, ignore_status=False)
+
+ def copyFrom(self, remoteSrc, localDst):
+ """
+ Copy file from target.
+ """
+ remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc)
+ scpCmd = self.scp + [remotePath, localDst]
+ return self._run(scpCmd, ignore_status=False)
+
+ def copyDirTo(self, localSrc, remoteDst):
+ """
+ Copy recursively localSrc directory to remoteDst in target.
+ """
+
+ for root, dirs, files in os.walk(localSrc):
+ # Create directories in the target as needed
+ for d in dirs:
+ tmpDir = os.path.join(root, d).replace(localSrc, "")
+ newDir = os.path.join(remoteDst, tmpDir.lstrip("/"))
+ cmd = "mkdir -p %s" % newDir
+ self.run(cmd)
+
+ # Copy files into the target
+ for f in files:
+ tmpFile = os.path.join(root, f).replace(localSrc, "")
+ dstFile = os.path.join(remoteDst, tmpFile.lstrip("/"))
+ srcFile = os.path.join(root, f)
+ self.copyTo(srcFile, dstFile)
+
+ def deleteFiles(self, remotePath, files):
+ """
+ Deletes files in target's remotePath.
+ """
+
+ cmd = "rm"
+ if not isinstance(files, list):
+ files = [files]
+
+ for f in files:
+ cmd = "%s %s" % (cmd, os.path.join(remotePath, f))
+
+ self.run(cmd)
+
+
+ def deleteDir(self, remotePath):
+ """
+ Deletes target's remotePath directory.
+ """
+
+ cmd = "rmdir %s" % remotePath
+ self.run(cmd)
+
+
+ def deleteDirStructure(self, localPath, remotePath):
+ """
+ Delete recursively localPath structure directory in target's remotePath.
+
+ This function is very usefult to delete a package that is installed in
+ the DUT and the host running the test has such package extracted in tmp
+ directory.
+
+ Example:
+ pwd: /home/user/tmp
+ tree: .
+ └── work
+ ├── dir1
+ │   └── file1
+ └── dir2
+
+ localpath = "/home/user/tmp" and remotepath = "/home/user"
+
+ With the above variables this function will try to delete the
+ directory in the DUT in this order:
+ /home/user/work/dir1/file1
+ /home/user/work/dir1 (if dir is empty)
+ /home/user/work/dir2 (if dir is empty)
+ /home/user/work (if dir is empty)
+ """
+
+ for root, dirs, files in os.walk(localPath, topdown=False):
+ # Delete files first
+ tmpDir = os.path.join(root).replace(localPath, "")
+ remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
+ self.deleteFiles(remoteDir, files)
+
+ # Remove dirs if empty
+ for d in dirs:
+ tmpDir = os.path.join(root, d).replace(localPath, "")
+ remoteDir = os.path.join(remotePath, tmpDir.lstrip("/"))
+ self.deleteDir(remoteDir)
+
+def SSHCall(command, logger, timeout=None, **opts):
+
+ def run():
+ nonlocal output
+ nonlocal process
+ starttime = time.time()
+ process = subprocess.Popen(command, **options)
+ if timeout:
+ endtime = starttime + timeout
+ eof = False
+ while time.time() < endtime and not eof:
+ logger.debug('time: %s, endtime: %s' % (time.time(), endtime))
+ try:
+ if select.select([process.stdout], [], [], 5)[0] != []:
+ reader = codecs.getreader('utf-8')(process.stdout)
+ data = reader.read(1024, 1024)
+ if not data:
+ process.stdout.close()
+ eof = True
+ else:
+ output += data
+ logger.debug('Partial data from SSH call: %s' % data)
+ endtime = time.time() + timeout
+ except InterruptedError:
+ continue
+
+ # process hasn't returned yet
+ if not eof:
+ process.terminate()
+ time.sleep(5)
+ try:
+ process.kill()
+ except OSError:
+ pass
+ endtime = time.time() - starttime
+ lastline = ("\nProcess killed - no output for %d seconds. Total"
+ " running time: %d seconds." % (timeout, endtime))
+ logger.debug('Received data from SSH call %s ' % lastline)
+ output += lastline
+
+ else:
+ output = process.communicate()[0].decode("utf-8", errors='replace')
+ logger.debug('Data from SSH call: %s' % output.rstrip())
+
+ options = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.STDOUT,
+ "stdin": None,
+ "shell": False,
+ "bufsize": -1,
+ "preexec_fn": os.setsid,
+ }
+ options.update(opts)
+ output = ''
+ process = None
+
+ # Unset DISPLAY which means we won't trigger SSH_ASKPASS
+ env = os.environ.copy()
+ if "DISPLAY" in env:
+ del env['DISPLAY']
+ options['env'] = env
+
+ try:
+ run()
+ except:
+ # Need to guard against a SystemExit or other exception ocurring
+ # whilst running and ensure we don't leave a process behind.
+ if process.poll() is None:
+ process.kill()
+ logger.debug('Something went wrong, killing SSH process')
+ raise
+ return (process.wait(), output.rstrip())
diff --git a/poky/meta/lib/oeqa/core/tests/__init__.py b/poky/meta/lib/oeqa/core/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/__init__.py
diff --git a/poky/meta/lib/oeqa/core/tests/cases/data.py b/poky/meta/lib/oeqa/core/tests/cases/data.py
new file mode 100644
index 000000000..88003a6ad
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/data.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.oetag import OETestTag
+from oeqa.core.decorator.data import OETestDataDepends
+
+class DataTest(OETestCase):
+ data_vars = ['IMAGE', 'ARCH']
+
+ @OETestDataDepends(['MACHINE',])
+ @OETestTag('dataTestOk')
+ def testDataOk(self):
+ self.assertEqual(self.td.get('IMAGE'), 'core-image-minimal')
+ self.assertEqual(self.td.get('ARCH'), 'x86')
+ self.assertEqual(self.td.get('MACHINE'), 'qemuarm')
+
+ @OETestTag('dataTestFail')
+ def testDataFail(self):
+ pass
diff --git a/poky/meta/lib/oeqa/core/tests/cases/depends.py b/poky/meta/lib/oeqa/core/tests/cases/depends.py
new file mode 100644
index 000000000..17cdd90b1
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/depends.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class DependsTest(OETestCase):
+
+ def testDependsFirst(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsFirst'])
+ def testDependsSecond(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsSecond'])
+ def testDependsThird(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsSecond'])
+ def testDependsFourth(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsThird', 'testDependsFourth'])
+ def testDependsFifth(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular3'])
+ def testDependsCircular1(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular1'])
+ def testDependsCircular2(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestDepends(['testDependsCircular2'])
+ def testDependsCircular3(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/invalid/oeid.py b/poky/meta/lib/oeqa/core/tests/cases/loader/invalid/oeid.py
new file mode 100644
index 000000000..038d44593
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/invalid/oeid.py
@@ -0,0 +1,15 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class AnotherIDTest(OETestCase):
+
+ def testAnotherIdGood(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testAnotherIdOther(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testAnotherIdNone(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py
new file mode 100644
index 000000000..0fe4cb3f1
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class ThreadedTest(OETestCase):
+ def test_threaded_no_depends(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+class ThreadedTest2(OETestCase):
+ def test_threaded_same_module(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py
new file mode 100644
index 000000000..905f39784
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_alone.py
@@ -0,0 +1,8 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class ThreadedTestAlone(OETestCase):
+ def test_threaded_alone(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py
new file mode 100644
index 000000000..0c158d3ba
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_depends.py
@@ -0,0 +1,10 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.depends import OETestDepends
+
+class ThreadedTest3(OETestCase):
+ @OETestDepends(['threaded.ThreadedTest.test_threaded_no_depends'])
+ def test_threaded_depends(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py
new file mode 100644
index 000000000..63d17e040
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/threaded/threaded_module.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class ThreadedTestModule(OETestCase):
+ def test_threaded_module(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+class ThreadedTestModule2(OETestCase):
+ def test_threaded_module2(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/loader/valid/another.py b/poky/meta/lib/oeqa/core/tests/cases/loader/valid/another.py
new file mode 100644
index 000000000..c9ffd1777
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/loader/valid/another.py
@@ -0,0 +1,9 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class AnotherTest(OETestCase):
+
+ def testAnother(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/oeid.py b/poky/meta/lib/oeqa/core/tests/cases/oeid.py
new file mode 100644
index 000000000..c2d3d32f2
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/oeid.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.oeid import OETestID
+
+class IDTest(OETestCase):
+
+ @OETestID(101)
+ def testIdGood(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestID(102)
+ def testIdOther(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testIdNone(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/oetag.py b/poky/meta/lib/oeqa/core/tests/cases/oetag.py
new file mode 100644
index 000000000..0cae02e75
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/oetag.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.oetag import OETestTag
+
+class TagTest(OETestCase):
+
+ @OETestTag('goodTag')
+ def testTagGood(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETestTag('otherTag')
+ def testTagOther(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ def testTagNone(self):
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/cases/timeout.py b/poky/meta/lib/oeqa/core/tests/cases/timeout.py
new file mode 100644
index 000000000..870c3157f
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/cases/timeout.py
@@ -0,0 +1,18 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from time import sleep
+
+from oeqa.core.case import OETestCase
+from oeqa.core.decorator.oetimeout import OETimeout
+
+class TimeoutTest(OETestCase):
+
+ @OETimeout(1)
+ def testTimeoutPass(self):
+ self.assertTrue(True, msg='How is this possible?')
+
+ @OETimeout(1)
+ def testTimeoutFail(self):
+ sleep(2)
+ self.assertTrue(True, msg='How is this possible?')
diff --git a/poky/meta/lib/oeqa/core/tests/common.py b/poky/meta/lib/oeqa/core/tests/common.py
new file mode 100644
index 000000000..193232340
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/common.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import sys
+import os
+
+import unittest
+import logging
+import os
+
+logger = logging.getLogger("oeqa")
+logger.setLevel(logging.INFO)
+consoleHandler = logging.StreamHandler()
+formatter = logging.Formatter('OEQATest: %(message)s')
+consoleHandler.setFormatter(formatter)
+logger.addHandler(consoleHandler)
+
+def setup_sys_path():
+ directory = os.path.dirname(os.path.abspath(__file__))
+ oeqa_lib = os.path.realpath(os.path.join(directory, '../../../'))
+ if not oeqa_lib in sys.path:
+ sys.path.insert(0, oeqa_lib)
+
+class TestBase(unittest.TestCase):
+ def setUp(self):
+ self.logger = logger
+ directory = os.path.dirname(os.path.abspath(__file__))
+ self.cases_path = os.path.join(directory, 'cases')
+
+ def _testLoader(self, d={}, modules=[], tests=[], filters={}):
+ from oeqa.core.context import OETestContext
+ tc = OETestContext(d, self.logger)
+ tc.loadTests(self.cases_path, modules=modules, tests=tests,
+ filters=filters)
+ return tc
+
+ def _testLoaderThreaded(self, d={}, modules=[],
+ tests=[], filters={}):
+ from oeqa.core.threaded import OETestContextThreaded
+
+ tc = OETestContextThreaded(d, self.logger)
+ tc.loadTests(self.cases_path, modules=modules, tests=tests,
+ filters=filters)
+
+ return tc
diff --git a/poky/meta/lib/oeqa/core/tests/test_data.py b/poky/meta/lib/oeqa/core/tests/test_data.py
new file mode 100755
index 000000000..320468cbe
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/test_data.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+import logging
+import os
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQAMissingVariable
+from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames
+
+class TestData(TestBase):
+ modules = ['data']
+
+ def test_data_fail_missing_variable(self):
+ expectedException = "oeqa.core.exception.OEQAMissingVariable"
+
+ tc = self._testLoader(modules=self.modules)
+ self.assertEqual(False, tc.runTests().wasSuccessful())
+ for test, data in tc._results['errors']:
+ expect = False
+ if expectedException in data:
+ expect = True
+
+ self.assertTrue(expect)
+
+ def test_data_fail_wrong_variable(self):
+ expectedError = 'AssertionError'
+ d = {'IMAGE' : 'core-image-sato', 'ARCH' : 'arm'}
+
+ tc = self._testLoader(d=d, modules=self.modules)
+ self.assertEqual(False, tc.runTests().wasSuccessful())
+ for test, data in tc._results['failures']:
+ expect = False
+ if expectedError in data:
+ expect = True
+
+ self.assertTrue(expect)
+
+ def test_data_ok(self):
+ d = {'IMAGE' : 'core-image-minimal', 'ARCH' : 'x86', 'MACHINE' : 'qemuarm'}
+
+ tc = self._testLoader(d=d, modules=self.modules)
+ self.assertEqual(True, tc.runTests().wasSuccessful())
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/poky/meta/lib/oeqa/core/tests/test_decorators.py b/poky/meta/lib/oeqa/core/tests/test_decorators.py
new file mode 100755
index 000000000..cf99e0d72
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/test_decorators.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import signal
+import unittest
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQADependency
+from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames, getSuiteCasesIDs
+
+class TestFilterDecorator(TestBase):
+
+ def _runFilterTest(self, modules, filters, expect, msg):
+ tc = self._testLoader(modules=modules, filters=filters)
+ test_loaded = set(getSuiteCasesNames(tc.suites))
+ self.assertEqual(expect, test_loaded, msg=msg)
+
+ def test_oetag(self):
+ # Get all cases without filtering.
+ filter_all = {}
+ test_all = {'testTagGood', 'testTagOther', 'testTagNone'}
+ msg_all = 'Failed to get all oetag cases without filtering.'
+
+ # Get cases with 'goodTag'.
+ filter_good = {'oetag':'goodTag'}
+ test_good = {'testTagGood'}
+ msg_good = 'Failed to get just one test filtering with "goodTag" oetag.'
+
+ # Get cases with an invalid tag.
+ filter_invalid = {'oetag':'invalidTag'}
+ test_invalid = set()
+ msg_invalid = 'Failed to filter all test using an invalid oetag.'
+
+ tests = ((filter_all, test_all, msg_all),
+ (filter_good, test_good, msg_good),
+ (filter_invalid, test_invalid, msg_invalid))
+
+ for test in tests:
+ self._runFilterTest(['oetag'], test[0], test[1], test[2])
+
+ def test_oeid(self):
+ # Get all cases without filtering.
+ filter_all = {}
+ test_all = {'testIdGood', 'testIdOther', 'testIdNone'}
+ msg_all = 'Failed to get all oeid cases without filtering.'
+
+ # Get cases with '101' oeid.
+ filter_good = {'oeid': 101}
+ test_good = {'testIdGood'}
+ msg_good = 'Failed to get just one tes filtering with "101" oeid.'
+
+ # Get cases with an invalid id.
+ filter_invalid = {'oeid':999}
+ test_invalid = set()
+ msg_invalid = 'Failed to filter all test using an invalid oeid.'
+
+ tests = ((filter_all, test_all, msg_all),
+ (filter_good, test_good, msg_good),
+ (filter_invalid, test_invalid, msg_invalid))
+
+ for test in tests:
+ self._runFilterTest(['oeid'], test[0], test[1], test[2])
+
+class TestDependsDecorator(TestBase):
+ modules = ['depends']
+
+ def test_depends_order(self):
+ tests = ['depends.DependsTest.testDependsFirst',
+ 'depends.DependsTest.testDependsSecond',
+ 'depends.DependsTest.testDependsThird',
+ 'depends.DependsTest.testDependsFourth',
+ 'depends.DependsTest.testDependsFifth']
+ tests2 = list(tests)
+ tests2[2], tests2[3] = tests[3], tests[2]
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ test_loaded = getSuiteCasesIDs(tc.suites)
+ result = True if test_loaded == tests or test_loaded == tests2 else False
+ msg = 'Failed to order tests using OETestDepends decorator.\nTest order:'\
+ ' %s.\nExpected: %s\nOr: %s' % (test_loaded, tests, tests2)
+ self.assertTrue(result, msg=msg)
+
+ def test_depends_fail_missing_dependency(self):
+ expect = "TestCase depends.DependsTest.testDependsSecond depends on "\
+ "depends.DependsTest.testDependsFirst and isn't available"
+ tests = ['depends.DependsTest.testDependsSecond']
+ try:
+ # Must throw OEQADependency because missing 'testDependsFirst'
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.fail('Expected OEQADependency exception')
+ except OEQADependency as e:
+ result = True if expect in str(e) else False
+ msg = 'Expected OEQADependency exception missing testDependsFirst test'
+ self.assertTrue(result, msg=msg)
+
+ def test_depends_fail_circular_dependency(self):
+ expect = 'have a circular dependency'
+ tests = ['depends.DependsTest.testDependsCircular1',
+ 'depends.DependsTest.testDependsCircular2',
+ 'depends.DependsTest.testDependsCircular3']
+ try:
+ # Must throw OEQADependency because circular dependency
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.fail('Expected OEQADependency exception')
+ except OEQADependency as e:
+ result = True if expect in str(e) else False
+ msg = 'Expected OEQADependency exception having a circular dependency'
+ self.assertTrue(result, msg=msg)
+
+class TestTimeoutDecorator(TestBase):
+ modules = ['timeout']
+
+ def test_timeout(self):
+ tests = ['timeout.TimeoutTest.testTimeoutPass']
+ msg = 'Failed to run test using OETestTimeout'
+ alarm_signal = signal.getsignal(signal.SIGALRM)
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
+ msg = "OETestTimeout didn't restore SIGALRM"
+ self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+
+ def test_timeout_fail(self):
+ tests = ['timeout.TimeoutTest.testTimeoutFail']
+ msg = "OETestTimeout test didn't timeout as expected"
+ alarm_signal = signal.getsignal(signal.SIGALRM)
+ tc = self._testLoader(modules=self.modules, tests=tests)
+ self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
+ msg = "OETestTimeout didn't restore SIGALRM"
+ self.assertIs(alarm_signal, signal.getsignal(signal.SIGALRM), msg=msg)
+
+ def test_timeout_thread(self):
+ tests = ['timeout.TimeoutTest.testTimeoutPass']
+ msg = 'Failed to run test using OETestTimeout'
+ tc = self._testLoaderThreaded(modules=self.modules, tests=tests)
+ self.assertTrue(tc.runTests().wasSuccessful(), msg=msg)
+
+ def test_timeout_threaded_fail(self):
+ tests = ['timeout.TimeoutTest.testTimeoutFail']
+ msg = "OETestTimeout test didn't timeout as expected"
+ tc = self._testLoaderThreaded(modules=self.modules, tests=tests)
+ self.assertFalse(tc.runTests().wasSuccessful(), msg=msg)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/poky/meta/lib/oeqa/core/tests/test_loader.py b/poky/meta/lib/oeqa/core/tests/test_loader.py
new file mode 100755
index 000000000..e0d917d31
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/test_loader.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2016-2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import unittest
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.exception import OEQADependency
+from oeqa.core.utils.test import getSuiteModules, getSuiteCasesIDs
+
+class TestLoader(TestBase):
+
+ def test_fail_empty_filter(self):
+ filters = {'oetag' : ''}
+ expect = 'Filter oetag specified is empty'
+ msg = 'Expected TypeError exception for having invalid filter'
+ try:
+ # Must throw TypeError because empty filter
+ tc = self._testLoader(filters=filters)
+ self.fail(msg)
+ except TypeError as e:
+ result = True if expect in str(e) else False
+ self.assertTrue(result, msg=msg)
+
+ def test_fail_invalid_filter(self):
+ filters = {'invalid' : 'good'}
+ expect = 'filter but not declared in any of'
+ msg = 'Expected TypeError exception for having invalid filter'
+ try:
+ # Must throw TypeError because invalid filter
+ tc = self._testLoader(filters=filters)
+ self.fail(msg)
+ except TypeError as e:
+ result = True if expect in str(e) else False
+ self.assertTrue(result, msg=msg)
+
+ def test_fail_duplicated_module(self):
+ cases_path = self.cases_path
+ invalid_path = os.path.join(cases_path, 'loader', 'invalid')
+ self.cases_path = [self.cases_path, invalid_path]
+ expect = 'Duplicated oeid module found in'
+ msg = 'Expected ImportError exception for having duplicated module'
+ try:
+ # Must throw ImportEror because duplicated module
+ tc = self._testLoader()
+ self.fail(msg)
+ except ImportError as e:
+ result = True if expect in str(e) else False
+ self.assertTrue(result, msg=msg)
+ finally:
+ self.cases_path = cases_path
+
+ def test_filter_modules(self):
+ expected_modules = {'oeid', 'oetag'}
+ tc = self._testLoader(modules=expected_modules)
+ modules = getSuiteModules(tc.suites)
+ msg = 'Expected just %s modules' % ', '.join(expected_modules)
+ self.assertEqual(modules, expected_modules, msg=msg)
+
+ def test_filter_cases(self):
+ modules = ['oeid', 'oetag', 'data']
+ expected_cases = {'data.DataTest.testDataOk',
+ 'oetag.TagTest.testTagGood',
+ 'oeid.IDTest.testIdGood'}
+ tc = self._testLoader(modules=modules, tests=expected_cases)
+ cases = set(getSuiteCasesIDs(tc.suites))
+ msg = 'Expected just %s cases' % ', '.join(expected_cases)
+ self.assertEqual(cases, expected_cases, msg=msg)
+
+ def test_import_from_paths(self):
+ cases_path = self.cases_path
+ cases2_path = os.path.join(cases_path, 'loader', 'valid')
+ expected_modules = {'oeid', 'another'}
+ self.cases_path = [self.cases_path, cases2_path]
+ tc = self._testLoader(modules=expected_modules)
+ modules = getSuiteModules(tc.suites)
+ self.cases_path = cases_path
+ msg = 'Expected modules from two different paths'
+ self.assertEqual(modules, expected_modules, msg=msg)
+
+ def test_loader_threaded(self):
+ cases_path = self.cases_path
+
+ self.cases_path = [os.path.join(self.cases_path, 'loader', 'threaded')]
+
+ tc = self._testLoaderThreaded()
+ self.assertEqual(len(tc.suites), 3, "Expected to be 3 suites")
+
+ case_ids = ['threaded.ThreadedTest.test_threaded_no_depends',
+ 'threaded.ThreadedTest2.test_threaded_same_module',
+ 'threaded_depends.ThreadedTest3.test_threaded_depends']
+ for case in tc.suites[0]._tests:
+ self.assertEqual(case.id(),
+ case_ids[tc.suites[0]._tests.index(case)])
+
+ case_ids = ['threaded_alone.ThreadedTestAlone.test_threaded_alone']
+ for case in tc.suites[1]._tests:
+ self.assertEqual(case.id(),
+ case_ids[tc.suites[1]._tests.index(case)])
+
+ case_ids = ['threaded_module.ThreadedTestModule.test_threaded_module',
+ 'threaded_module.ThreadedTestModule2.test_threaded_module2']
+ for case in tc.suites[2]._tests:
+ self.assertEqual(case.id(),
+ case_ids[tc.suites[2]._tests.index(case)])
+
+ self.cases_path = cases_path
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/poky/meta/lib/oeqa/core/tests/test_runner.py b/poky/meta/lib/oeqa/core/tests/test_runner.py
new file mode 100755
index 000000000..a3f3861fe
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/tests/test_runner.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+import logging
+import tempfile
+
+from common import setup_sys_path, TestBase
+setup_sys_path()
+
+from oeqa.core.runner import OEStreamLogger
+
+class TestRunner(TestBase):
+ def test_stream_logger(self):
+ fp = tempfile.TemporaryFile(mode='w+')
+
+ logging.basicConfig(format='%(message)s', stream=fp)
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+
+ oeSL = OEStreamLogger(logger)
+
+ lines = ['init', 'bigline_' * 65535, 'morebigline_' * 65535 * 4, 'end']
+ for line in lines:
+ oeSL.write(line)
+
+ fp.seek(0)
+ fp_lines = fp.readlines()
+ for i, fp_line in enumerate(fp_lines):
+ fp_line = fp_line.strip()
+ self.assertEqual(lines[i], fp_line)
+
+ fp.close()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/poky/meta/lib/oeqa/core/threaded.py b/poky/meta/lib/oeqa/core/threaded.py
new file mode 100644
index 000000000..2cafe03a2
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/threaded.py
@@ -0,0 +1,275 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import threading
+import multiprocessing
+import queue
+import time
+
+from unittest.suite import TestSuite
+
+from oeqa.core.loader import OETestLoader
+from oeqa.core.runner import OEStreamLogger, OETestResult, OETestRunner
+from oeqa.core.context import OETestContext
+
+class OETestLoaderThreaded(OETestLoader):
+ def __init__(self, tc, module_paths, modules, tests, modules_required,
+ filters, process_num=0, *args, **kwargs):
+ super(OETestLoaderThreaded, self).__init__(tc, module_paths, modules,
+ tests, modules_required, filters, *args, **kwargs)
+
+ self.process_num = process_num
+
+ def discover(self):
+ suite = super(OETestLoaderThreaded, self).discover()
+
+ if self.process_num <= 0:
+ self.process_num = min(multiprocessing.cpu_count(),
+ len(suite._tests))
+
+ suites = []
+ for _ in range(self.process_num):
+ suites.append(self.suiteClass())
+
+ def _search_for_module_idx(suites, case):
+ """
+ Cases in the same module needs to be run
+ in the same thread because PyUnit keeps track
+ of setUp{Module, Class,} and tearDown{Module, Class,}.
+ """
+
+ for idx in range(self.process_num):
+ suite = suites[idx]
+ for c in suite._tests:
+ if case.__module__ == c.__module__:
+ return idx
+
+ return -1
+
+ def _search_for_depend_idx(suites, depends):
+ """
+ Dependency cases needs to be run in the same
+ thread, because OEQA framework look at the state
+ of dependant test to figure out if skip or not.
+ """
+
+ for idx in range(self.process_num):
+ suite = suites[idx]
+
+ for case in suite._tests:
+ if case.id() in depends:
+ return idx
+ return -1
+
+ def _get_best_idx(suites):
+ sizes = [len(suite._tests) for suite in suites]
+ return sizes.index(min(sizes))
+
+ def _fill_suites(suite):
+ idx = -1
+ for case in suite:
+ if isinstance(case, TestSuite):
+ _fill_suites(case)
+ else:
+ idx = _search_for_module_idx(suites, case)
+
+ depends = {}
+ if 'depends' in self.tc._registry:
+ depends = self.tc._registry['depends']
+
+ if idx == -1 and case.id() in depends:
+ case_depends = depends[case.id()]
+ idx = _search_for_depend_idx(suites, case_depends)
+
+ if idx == -1:
+ idx = _get_best_idx(suites)
+
+ suites[idx].addTest(case)
+ _fill_suites(suite)
+
+ suites_tmp = suites
+ suites = []
+ for suite in suites_tmp:
+ if len(suite._tests) > 0:
+ suites.append(suite)
+
+ return suites
+
+class OEStreamLoggerThreaded(OEStreamLogger):
+ _lock = threading.Lock()
+ buffers = {}
+
+ def write(self, msg):
+ tid = threading.get_ident()
+
+ if not tid in self.buffers:
+ self.buffers[tid] = ""
+
+ if msg:
+ self.buffers[tid] += msg
+
+ def finish(self):
+ tid = threading.get_ident()
+
+ self._lock.acquire()
+ self.logger.info('THREAD: %d' % tid)
+ self.logger.info('-' * 70)
+ for line in self.buffers[tid].split('\n'):
+ self.logger.info(line)
+ self._lock.release()
+
+class OETestResultThreadedInternal(OETestResult):
+ def _tc_map_results(self):
+ tid = threading.get_ident()
+
+ # PyUnit generates a result for every test module run, test
+ # if the thread already has an entry to avoid lose the previous
+ # test module results.
+ if not tid in self.tc._results:
+ self.tc._results[tid] = {}
+ self.tc._results[tid]['failures'] = self.failures
+ self.tc._results[tid]['errors'] = self.errors
+ self.tc._results[tid]['skipped'] = self.skipped
+ self.tc._results[tid]['expectedFailures'] = self.expectedFailures
+
+class OETestResultThreaded(object):
+ _results = {}
+ _lock = threading.Lock()
+
+ def __init__(self, tc):
+ self.tc = tc
+
+ def _fill_tc_results(self):
+ tids = list(self.tc._results.keys())
+ fields = ['failures', 'errors', 'skipped', 'expectedFailures']
+
+ for tid in tids:
+ result = self.tc._results[tid]
+ for field in fields:
+ if not field in self.tc._results:
+ self.tc._results[field] = []
+ self.tc._results[field].extend(result[field])
+
+ def addResult(self, result, run_start_time, run_end_time):
+ tid = threading.get_ident()
+
+ self._lock.acquire()
+ self._results[tid] = {}
+ self._results[tid]['result'] = result
+ self._results[tid]['run_start_time'] = run_start_time
+ self._results[tid]['run_end_time'] = run_end_time
+ self._results[tid]['result'] = result
+ self._lock.release()
+
+ def wasSuccessful(self):
+ wasSuccessful = True
+ for tid in self._results.keys():
+ wasSuccessful = wasSuccessful and \
+ self._results[tid]['result'].wasSuccessful()
+ return wasSuccessful
+
+ def stop(self):
+ for tid in self._results.keys():
+ self._results[tid]['result'].stop()
+
+ def logSummary(self, component, context_msg=''):
+ elapsed_time = (self.tc._run_end_time - self.tc._run_start_time)
+
+ self.tc.logger.info("SUMMARY:")
+ self.tc.logger.info("%s (%s) - Ran %d tests in %.3fs" % (component,
+ context_msg, len(self.tc._registry['cases']), elapsed_time))
+ if self.wasSuccessful():
+ msg = "%s - OK - All required tests passed" % component
+ else:
+ msg = "%s - FAIL - Required tests failed" % component
+ self.tc.logger.info(msg)
+
+ def logDetails(self):
+ if list(self._results):
+ tid = list(self._results)[0]
+ result = self._results[tid]['result']
+ result.logDetails()
+
+class _Worker(threading.Thread):
+ """Thread executing tasks from a given tasks queue"""
+ def __init__(self, tasks, result, stream):
+ threading.Thread.__init__(self)
+ self.tasks = tasks
+
+ self.result = result
+ self.stream = stream
+
+ def run(self):
+ while True:
+ try:
+ func, args, kargs = self.tasks.get(block=False)
+ except queue.Empty:
+ break
+
+ try:
+ run_start_time = time.time()
+ rc = func(*args, **kargs)
+ run_end_time = time.time()
+ self.result.addResult(rc, run_start_time, run_end_time)
+ self.stream.finish()
+ except Exception as e:
+ print(e)
+ finally:
+ self.tasks.task_done()
+
+class _ThreadedPool:
+ """Pool of threads consuming tasks from a queue"""
+ def __init__(self, num_workers, num_tasks, stream=None, result=None):
+ self.tasks = queue.Queue(num_tasks)
+ self.workers = []
+
+ for _ in range(num_workers):
+ worker = _Worker(self.tasks, result, stream)
+ self.workers.append(worker)
+
+ def start(self):
+ for worker in self.workers:
+ worker.start()
+
+ def add_task(self, func, *args, **kargs):
+ """Add a task to the queue"""
+ self.tasks.put((func, args, kargs))
+
+ def wait_completion(self):
+ """Wait for completion of all the tasks in the queue"""
+ self.tasks.join()
+ for worker in self.workers:
+ worker.join()
+
+class OETestRunnerThreaded(OETestRunner):
+ streamLoggerClass = OEStreamLoggerThreaded
+
+ def __init__(self, tc, *args, **kwargs):
+ super(OETestRunnerThreaded, self).__init__(tc, *args, **kwargs)
+ self.resultclass = OETestResultThreadedInternal # XXX: XML reporting overrides at __init__
+
+ def run(self, suites):
+ result = OETestResultThreaded(self.tc)
+
+ pool = _ThreadedPool(len(suites), len(suites), stream=self.stream,
+ result=result)
+ for s in suites:
+ pool.add_task(super(OETestRunnerThreaded, self).run, s)
+ pool.start()
+ pool.wait_completion()
+ result._fill_tc_results()
+
+ return result
+
+class OETestContextThreaded(OETestContext):
+ loaderClass = OETestLoaderThreaded
+ runnerClass = OETestRunnerThreaded
+
+ def loadTests(self, module_paths, modules=[], tests=[],
+ modules_manifest="", modules_required=[], filters={}, process_num=0):
+ if modules_manifest:
+ modules = self._read_modules_from_manifest(modules_manifest)
+
+ self.loader = self.loaderClass(self, module_paths, modules, tests,
+ modules_required, filters, process_num)
+ self.suites = self.loader.discover()
diff --git a/poky/meta/lib/oeqa/core/utils/__init__.py b/poky/meta/lib/oeqa/core/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/utils/__init__.py
diff --git a/poky/meta/lib/oeqa/core/utils/misc.py b/poky/meta/lib/oeqa/core/utils/misc.py
new file mode 100644
index 000000000..0b223b5d0
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/utils/misc.py
@@ -0,0 +1,44 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+def toList(obj, obj_type, obj_name="Object"):
+ if isinstance(obj, obj_type):
+ return [obj]
+ elif isinstance(obj, list):
+ return obj
+ else:
+ raise TypeError("%s must be %s or list" % (obj_name, obj_type))
+
+def toSet(obj, obj_type, obj_name="Object"):
+ if isinstance(obj, obj_type):
+ return {obj}
+ elif isinstance(obj, list):
+ return set(obj)
+ elif isinstance(obj, set):
+ return obj
+ else:
+ raise TypeError("%s must be %s or set" % (obj_name, obj_type))
+
+def strToList(obj, obj_name="Object"):
+ return toList(obj, str, obj_name)
+
+def strToSet(obj, obj_name="Object"):
+ return toSet(obj, str, obj_name)
+
+def intToList(obj, obj_name="Object"):
+ return toList(obj, int, obj_name)
+
+def dataStoteToDict(d, variables):
+ data = {}
+
+ for v in variables:
+ data[v] = d.getVar(v)
+
+ return data
+
+def updateTestData(d, td, variables):
+ """
+ Updates variables with values of data store to test data.
+ """
+ for var in variables:
+ td[var] = d.getVar(var)
diff --git a/poky/meta/lib/oeqa/core/utils/path.py b/poky/meta/lib/oeqa/core/utils/path.py
new file mode 100644
index 000000000..a21caad5c
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/utils/path.py
@@ -0,0 +1,19 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+
+def findFile(file_name, directory):
+ """
+ Search for a file in directory and returns its complete path.
+ """
+ for r, d, f in os.walk(directory):
+ if file_name in f:
+ return os.path.join(r, file_name)
+ return None
+
+def remove_safe(path):
+ if os.path.exists(path):
+ os.remove(path)
+
diff --git a/poky/meta/lib/oeqa/core/utils/test.py b/poky/meta/lib/oeqa/core/utils/test.py
new file mode 100644
index 000000000..88d5d1398
--- /dev/null
+++ b/poky/meta/lib/oeqa/core/utils/test.py
@@ -0,0 +1,86 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import inspect
+import unittest
+
+def getSuiteCases(suite):
+ """
+ Returns individual test from a test suite.
+ """
+ tests = []
+
+ if isinstance(suite, unittest.TestCase):
+ tests.append(suite)
+ elif isinstance(suite, unittest.suite.TestSuite):
+ for item in suite:
+ tests.extend(getSuiteCases(item))
+
+ return tests
+
+def getSuiteModules(suite):
+ """
+ Returns modules in a test suite.
+ """
+ modules = set()
+ for test in getSuiteCases(suite):
+ modules.add(getCaseModule(test))
+ return modules
+
+def getSuiteCasesInfo(suite, func):
+ """
+ Returns test case info from suite. Info is fetched from func.
+ """
+ tests = []
+ for test in getSuiteCases(suite):
+ tests.append(func(test))
+ return tests
+
+def getSuiteCasesNames(suite):
+ """
+ Returns test case names from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseMethod)
+
+def getSuiteCasesIDs(suite):
+ """
+ Returns test case ids from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseID)
+
+def getSuiteCasesFiles(suite):
+ """
+ Returns test case files paths from suite.
+ """
+ return getSuiteCasesInfo(suite, getCaseFile)
+
+def getCaseModule(test_case):
+ """
+ Returns test case module name.
+ """
+ return test_case.__module__
+
+def getCaseClass(test_case):
+ """
+ Returns test case class name.
+ """
+ return test_case.__class__.__name__
+
+def getCaseID(test_case):
+ """
+ Returns test case complete id.
+ """
+ return test_case.id()
+
+def getCaseFile(test_case):
+ """
+ Returns test case file path.
+ """
+ return inspect.getsourcefile(test_case.__class__)
+
+def getCaseMethod(test_case):
+ """
+ Returns test case method name.
+ """
+ return getCaseID(test_case).split('.')[-1]
diff --git a/poky/meta/lib/oeqa/files/test.c b/poky/meta/lib/oeqa/files/test.c
new file mode 100644
index 000000000..2d8389c92
--- /dev/null
+++ b/poky/meta/lib/oeqa/files/test.c
@@ -0,0 +1,26 @@
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+
+double convert(long long l)
+{
+ return (double)l;
+}
+
+int main(int argc, char * argv[]) {
+
+ long long l = 10;
+ double f;
+ double check = 10.0;
+
+ f = convert(l);
+ printf("convert: %lld => %f\n", l, f);
+ if ( f != check ) exit(1);
+
+ f = 1234.67;
+ check = 1234.0;
+ printf("floorf(%f) = %f\n", f, floorf(f));
+ if ( floorf(f) != check) exit(1);
+
+ return 0;
+}
diff --git a/poky/meta/lib/oeqa/files/test.cpp b/poky/meta/lib/oeqa/files/test.cpp
new file mode 100644
index 000000000..9e1a76473
--- /dev/null
+++ b/poky/meta/lib/oeqa/files/test.cpp
@@ -0,0 +1,3 @@
+#include <limits>
+
+int main() {} \ No newline at end of file
diff --git a/poky/meta/lib/oeqa/files/test.pl b/poky/meta/lib/oeqa/files/test.pl
new file mode 100644
index 000000000..689c8f163
--- /dev/null
+++ b/poky/meta/lib/oeqa/files/test.pl
@@ -0,0 +1,2 @@
+$a = 9.01e+21 - 9.01e+21 + 0.01;
+print ("the value of a is ", $a, "\n");
diff --git a/poky/meta/lib/oeqa/files/test.py b/poky/meta/lib/oeqa/files/test.py
new file mode 100644
index 000000000..f389225d7
--- /dev/null
+++ b/poky/meta/lib/oeqa/files/test.py
@@ -0,0 +1,6 @@
+import os
+
+os.system('touch /tmp/testfile.python')
+
+a = 9.01e+21 - 9.01e+21 + 0.01
+print("the value of a is %s" % a)
diff --git a/poky/meta/lib/oeqa/oetest.py b/poky/meta/lib/oeqa/oetest.py
new file mode 100644
index 000000000..f7171260e
--- /dev/null
+++ b/poky/meta/lib/oeqa/oetest.py
@@ -0,0 +1,616 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Main unittest module used by testimage.bbclass
+# This provides the oeRuntimeTest base class which is inherited by all tests in meta/lib/oeqa/runtime.
+
+# It also has some helper functions and it's responsible for actually starting the tests
+
+import os, re, mmap, sys
+import unittest
+import inspect
+import subprocess
+import signal
+import shutil
+import functools
+try:
+ import bb
+except ImportError:
+ pass
+import logging
+
+import oeqa.runtime
+# Exported test doesn't require sdkext
+try:
+ import oeqa.sdkext
+except ImportError:
+ pass
+from oeqa.utils.decorators import LogResults, gettag, getResults
+
+logger = logging.getLogger("BitBake")
+
+def getVar(obj):
+ #extend form dict, if a variable didn't exists, need find it in testcase
+ class VarDict(dict):
+ def __getitem__(self, key):
+ return gettag(obj, key)
+ return VarDict()
+
+def checkTags(tc, tagexp):
+ return eval(tagexp, None, getVar(tc))
+
+def filterByTagExp(testsuite, tagexp):
+ if not tagexp:
+ return testsuite
+ caseList = []
+ for each in testsuite:
+ if not isinstance(each, unittest.BaseTestSuite):
+ if checkTags(each, tagexp):
+ caseList.append(each)
+ else:
+ caseList.append(filterByTagExp(each, tagexp))
+ return testsuite.__class__(caseList)
+
+@LogResults
+class oeTest(unittest.TestCase):
+
+ pscmd = "ps"
+ longMessage = True
+
+ @classmethod
+ def hasPackage(self, pkg):
+ """
+ True if the full package name exists in the manifest, False otherwise.
+ """
+ return pkg in oeTest.tc.pkgmanifest
+
+ @classmethod
+ def hasPackageMatch(self, match):
+ """
+ True if match exists in the manifest as a regular expression substring,
+ False otherwise.
+ """
+ for s in oeTest.tc.pkgmanifest:
+ if re.match(match, s):
+ return True
+ return False
+
+ @classmethod
+ def hasFeature(self,feature):
+ if feature in oeTest.tc.imagefeatures or \
+ feature in oeTest.tc.distrofeatures:
+ return True
+ else:
+ return False
+
+class oeRuntimeTest(oeTest):
+ def __init__(self, methodName='runTest'):
+ self.target = oeRuntimeTest.tc.target
+ super(oeRuntimeTest, self).__init__(methodName)
+
+ def setUp(self):
+ # Install packages in the DUT
+ self.tc.install_uninstall_packages(self.id())
+
+ # Check if test needs to run
+ if self.tc.sigterm:
+ self.fail("Got SIGTERM")
+ elif (type(self.target).__name__ == "QemuTarget"):
+ self.assertTrue(self.target.check(), msg = "Qemu not running?")
+
+ self.setUpLocal()
+
+ # a setup method before tests but after the class instantiation
+ def setUpLocal(self):
+ pass
+
+ def tearDown(self):
+ # Uninstall packages in the DUT
+ self.tc.install_uninstall_packages(self.id(), False)
+
+ res = getResults()
+ # If a test fails or there is an exception dump
+ # for QemuTarget only
+ if (type(self.target).__name__ == "QemuTarget" and
+ (self.id() in res.getErrorList() or
+ self.id() in res.getFailList())):
+ self.tc.host_dumper.create_dir(self._testMethodName)
+ self.tc.host_dumper.dump_host()
+ self.target.target_dumper.dump_target(
+ self.tc.host_dumper.dump_dir)
+ print ("%s dump data stored in %s" % (self._testMethodName,
+ self.tc.host_dumper.dump_dir))
+
+ self.tearDownLocal()
+
+ # Method to be run after tearDown and implemented by child classes
+ def tearDownLocal(self):
+ pass
+
+def getmodule(pos=2):
+ # stack returns a list of tuples containg frame information
+ # First element of the list the is current frame, caller is 1
+ frameinfo = inspect.stack()[pos]
+ modname = inspect.getmodulename(frameinfo[1])
+ #modname = inspect.getmodule(frameinfo[0]).__name__
+ return modname
+
+def skipModule(reason, pos=2):
+ modname = getmodule(pos)
+ if modname not in oeTest.tc.testsrequired:
+ raise unittest.SkipTest("%s: %s" % (modname, reason))
+ else:
+ raise Exception("\nTest %s wants to be skipped.\nReason is: %s" \
+ "\nTest was required in TEST_SUITES, so either the condition for skipping is wrong" \
+ "\nor the image really doesn't have the required feature/package when it should." % (modname, reason))
+
+def skipModuleIf(cond, reason):
+
+ if cond:
+ skipModule(reason, 3)
+
+def skipModuleUnless(cond, reason):
+
+ if not cond:
+ skipModule(reason, 3)
+
+_buffer_logger = ""
+def custom_verbose(msg, *args, **kwargs):
+ global _buffer_logger
+ if msg[-1] != "\n":
+ _buffer_logger += msg
+ else:
+ _buffer_logger += msg
+ try:
+ bb.plain(_buffer_logger.rstrip("\n"), *args, **kwargs)
+ except NameError:
+ logger.info(_buffer_logger.rstrip("\n"), *args, **kwargs)
+ _buffer_logger = ""
+
+class TestContext(object):
+ def __init__(self, d, exported=False):
+ self.d = d
+
+ self.testsuites = self._get_test_suites()
+
+ if exported:
+ path = [os.path.dirname(os.path.abspath(__file__))]
+ extrapath = ""
+ else:
+ path = d.getVar("BBPATH").split(':')
+ extrapath = "lib/oeqa"
+
+ self.testslist = self._get_tests_list(path, extrapath)
+ self.testsrequired = self._get_test_suites_required()
+
+ self.filesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runtime/files")
+ self.corefilesdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+ self.imagefeatures = d.getVar("IMAGE_FEATURES").split()
+ self.distrofeatures = d.getVar("DISTRO_FEATURES").split()
+
+ # get testcase list from specified file
+ # if path is a relative path, then relative to build/conf/
+ def _read_testlist(self, fpath, builddir):
+ if not os.path.isabs(fpath):
+ fpath = os.path.join(builddir, "conf", fpath)
+ if not os.path.exists(fpath):
+ bb.fatal("No such manifest file: ", fpath)
+ tcs = []
+ for line in open(fpath).readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ tcs.append(line)
+ return " ".join(tcs)
+
+ # return test list by type also filter if TEST_SUITES is specified
+ def _get_tests_list(self, bbpath, extrapath):
+ testslist = []
+
+ type = self._get_test_namespace()
+
+ # This relies on lib/ under each directory in BBPATH being added to sys.path
+ # (as done by default in base.bbclass)
+ for testname in self.testsuites:
+ if testname != "auto":
+ if testname.startswith("oeqa."):
+ testslist.append(testname)
+ continue
+ found = False
+ for p in bbpath:
+ if os.path.exists(os.path.join(p, extrapath, type, testname + ".py")):
+ testslist.append("oeqa." + type + "." + testname)
+ found = True
+ break
+ elif os.path.exists(os.path.join(p, extrapath, type, testname.split(".")[0] + ".py")):
+ testslist.append("oeqa." + type + "." + testname)
+ found = True
+ break
+ if not found:
+ bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
+
+ if "auto" in self.testsuites:
+ def add_auto_list(path):
+ files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
+ for f in files:
+ module = 'oeqa.' + type + '.' + f[:-3]
+ if module not in testslist:
+ testslist.append(module)
+
+ for p in bbpath:
+ testpath = os.path.join(p, 'lib', 'oeqa', type)
+ bb.debug(2, 'Searching for tests in %s' % testpath)
+ if os.path.exists(testpath):
+ add_auto_list(testpath)
+
+ return testslist
+
+ def getTestModules(self):
+ """
+ Returns all the test modules in the testlist.
+ """
+
+ import pkgutil
+
+ modules = []
+ for test in self.testslist:
+ if re.search("\w+\.\w+\.test_\S+", test):
+ test = '.'.join(t.split('.')[:3])
+ module = pkgutil.get_loader(test)
+ modules.append(module)
+
+ return modules
+
+ def getModulefromID(self, test_id):
+ """
+ Returns the test module based on a test id.
+ """
+
+ module_name = ".".join(test_id.split(".")[:3])
+ modules = self.getTestModules()
+ for module in modules:
+ if module.name == module_name:
+ return module
+
+ return None
+
+ def getTests(self, test):
+ '''Return all individual tests executed when running the suite.'''
+ # Unfortunately unittest does not have an API for this, so we have
+ # to rely on implementation details. This only needs to work
+ # for TestSuite containing TestCase.
+ method = getattr(test, '_testMethodName', None)
+ if method:
+ # leaf case: a TestCase
+ yield test
+ else:
+ # Look into TestSuite.
+ tests = getattr(test, '_tests', [])
+ for t1 in tests:
+ for t2 in self.getTests(t1):
+ yield t2
+
+ def loadTests(self):
+ setattr(oeTest, "tc", self)
+
+ testloader = unittest.TestLoader()
+ testloader.sortTestMethodsUsing = None
+ suites = [testloader.loadTestsFromName(name) for name in self.testslist]
+ suites = filterByTagExp(suites, getattr(self, "tagexp", None))
+
+ # Determine dependencies between suites by looking for @skipUnlessPassed
+ # method annotations. Suite A depends on suite B if any method in A
+ # depends on a method on B.
+ for suite in suites:
+ suite.dependencies = []
+ suite.depth = 0
+ for test in self.getTests(suite):
+ methodname = getattr(test, '_testMethodName', None)
+ if methodname:
+ method = getattr(test, methodname)
+ depends_on = getattr(method, '_depends_on', None)
+ if depends_on:
+ for dep_suite in suites:
+ if depends_on in [getattr(t, '_testMethodName', None) for t in self.getTests(dep_suite)]:
+ if dep_suite not in suite.dependencies and \
+ dep_suite is not suite:
+ suite.dependencies.append(dep_suite)
+ break
+ else:
+ logger.warning("Test %s was declared as @skipUnlessPassed('%s') but that test is either not defined or not active. Will run the test anyway." %
+ (test, depends_on))
+
+ # Use brute-force topological sort to determine ordering. Sort by
+ # depth (higher depth = must run later), with original ordering to
+ # break ties.
+ def set_suite_depth(suite):
+ for dep in suite.dependencies:
+ new_depth = set_suite_depth(dep) + 1
+ if new_depth > suite.depth:
+ suite.depth = new_depth
+ return suite.depth
+
+ for index, suite in enumerate(suites):
+ set_suite_depth(suite)
+ suite.index = index
+
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+ def cmpfunc(a, b):
+ return cmp((a.depth, a.index), (b.depth, b.index))
+
+ suites.sort(key=functools.cmp_to_key(cmpfunc))
+
+ self.suite = testloader.suiteClass(suites)
+
+ return self.suite
+
+ def runTests(self):
+ logger.info("Test modules %s" % self.testslist)
+ if hasattr(self, "tagexp") and self.tagexp:
+ logger.info("Filter test cases by tags: %s" % self.tagexp)
+ logger.info("Found %s tests" % self.suite.countTestCases())
+ runner = unittest.TextTestRunner(verbosity=2)
+ if 'bb' in sys.modules:
+ runner.stream.write = custom_verbose
+
+ return runner.run(self.suite)
+
+class RuntimeTestContext(TestContext):
+ def __init__(self, d, target, exported=False):
+ super(RuntimeTestContext, self).__init__(d, exported)
+
+ self.target = target
+
+ self.pkgmanifest = {}
+ manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"),
+ d.getVar("IMAGE_LINK_NAME") + ".manifest")
+ nomanifest = d.getVar("IMAGE_NO_MANIFEST")
+ if nomanifest is None or nomanifest != "1":
+ try:
+ with open(manifest) as f:
+ for line in f:
+ (pkg, arch, version) = line.strip().split()
+ self.pkgmanifest[pkg] = (version, arch)
+ except IOError as e:
+ bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
+
+ def _get_test_namespace(self):
+ return "runtime"
+
+ def _get_test_suites(self):
+ testsuites = []
+
+ manifests = (self.d.getVar("TEST_SUITES_MANIFEST") or '').split()
+ if manifests:
+ for manifest in manifests:
+ testsuites.extend(self._read_testlist(manifest,
+ self.d.getVar("TOPDIR")).split())
+
+ else:
+ testsuites = self.d.getVar("TEST_SUITES").split()
+
+ return testsuites
+
+ def _get_test_suites_required(self):
+ return [t for t in self.d.getVar("TEST_SUITES").split() if t != "auto"]
+
+ def loadTests(self):
+ super(RuntimeTestContext, self).loadTests()
+ if oeTest.hasPackage("procps"):
+ oeRuntimeTest.pscmd = "ps -ef"
+
+ def extract_packages(self):
+ """
+ Find packages that will be needed during runtime.
+ """
+
+ modules = self.getTestModules()
+ bbpaths = self.d.getVar("BBPATH").split(":")
+
+ shutil.rmtree(self.d.getVar("TEST_EXTRACTED_DIR"))
+ shutil.rmtree(self.d.getVar("TEST_PACKAGED_DIR"))
+ for module in modules:
+ json_file = self._getJsonFile(module)
+ if json_file:
+ needed_packages = self._getNeededPackages(json_file)
+ self._perform_package_extraction(needed_packages)
+
+ def _perform_package_extraction(self, needed_packages):
+ """
+ Extract packages that will be needed during runtime.
+ """
+
+ import oe.path
+
+ extracted_path = self.d.getVar("TEST_EXTRACTED_DIR")
+ packaged_path = self.d.getVar("TEST_PACKAGED_DIR")
+
+ for key,value in needed_packages.items():
+ packages = ()
+ if isinstance(value, dict):
+ packages = (value, )
+ elif isinstance(value, list):
+ packages = value
+ else:
+ bb.fatal("Failed to process needed packages for %s; "
+ "Value must be a dict or list" % key)
+
+ for package in packages:
+ pkg = package["pkg"]
+ rm = package.get("rm", False)
+ extract = package.get("extract", True)
+ if extract:
+ dst_dir = os.path.join(extracted_path, pkg)
+ else:
+ dst_dir = os.path.join(packaged_path)
+
+ # Extract package and copy it to TEST_EXTRACTED_DIR
+ pkg_dir = self._extract_in_tmpdir(pkg)
+ if extract:
+
+ # Same package used for more than one test,
+ # don't need to extract again.
+ if os.path.exists(dst_dir):
+ continue
+ oe.path.copytree(pkg_dir, dst_dir)
+ shutil.rmtree(pkg_dir)
+
+ # Copy package to TEST_PACKAGED_DIR
+ else:
+ self._copy_package(pkg)
+
+ def _getJsonFile(self, module):
+ """
+ Returns the path of the JSON file for a module, empty if doesn't exitst.
+ """
+
+ module_file = module.path
+ json_file = "%s.json" % module_file.rsplit(".", 1)[0]
+ if os.path.isfile(module_file) and os.path.isfile(json_file):
+ return json_file
+ else:
+ return ""
+
+ def _getNeededPackages(self, json_file, test=None):
+ """
+ Returns a dict with needed packages based on a JSON file.
+
+
+ If a test is specified it will return the dict just for that test.
+ """
+
+ import json
+
+ needed_packages = {}
+
+ with open(json_file) as f:
+ test_packages = json.load(f)
+ for key,value in test_packages.items():
+ needed_packages[key] = value
+
+ if test:
+ if test in needed_packages:
+ needed_packages = needed_packages[test]
+ else:
+ needed_packages = {}
+
+ return needed_packages
+
+ def _extract_in_tmpdir(self, pkg):
+ """"
+ Returns path to a temp directory where the package was
+ extracted without dependencies.
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
+ pm = get_package_manager(self.d, pkg_path)
+ extract_dir = pm.extract(pkg)
+ shutil.rmtree(pkg_path)
+
+ return extract_dir
+
+ def _copy_package(self, pkg):
+ """
+ Copy the RPM, DEB or IPK package to dst_dir
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(self.d.getVar("TEST_INSTALL_TMP_DIR"), pkg)
+ dst_dir = self.d.getVar("TEST_PACKAGED_DIR")
+ pm = get_package_manager(self.d, pkg_path)
+ pkg_info = pm.package_info(pkg)
+ file_path = pkg_info[pkg]["filepath"]
+ shutil.copy2(file_path, dst_dir)
+ shutil.rmtree(pkg_path)
+
+ def install_uninstall_packages(self, test_id, pkg_dir, install):
+ """
+ Check if the test requires a package and Install/Uninstall it in the DUT
+ """
+
+ test = test_id.split(".")[4]
+ module = self.getModulefromID(test_id)
+ json = self._getJsonFile(module)
+ if json:
+ needed_packages = self._getNeededPackages(json, test)
+ if needed_packages:
+ self._install_uninstall_packages(needed_packages, pkg_dir, install)
+
+ def _install_uninstall_packages(self, needed_packages, pkg_dir, install=True):
+ """
+ Install/Uninstall packages in the DUT without using a package manager
+ """
+
+ if isinstance(needed_packages, dict):
+ packages = [needed_packages]
+ elif isinstance(needed_packages, list):
+ packages = needed_packages
+
+ for package in packages:
+ pkg = package["pkg"]
+ rm = package.get("rm", False)
+ extract = package.get("extract", True)
+ src_dir = os.path.join(pkg_dir, pkg)
+
+ # Install package
+ if install and extract:
+ self.target.connection.copy_dir_to(src_dir, "/")
+
+ # Uninstall package
+ elif not install and rm:
+ self.target.connection.delete_dir_structure(src_dir, "/")
+
+class ImageTestContext(RuntimeTestContext):
+ def __init__(self, d, target, host_dumper):
+ super(ImageTestContext, self).__init__(d, target)
+
+ self.tagexp = d.getVar("TEST_SUITES_TAGS")
+
+ self.host_dumper = host_dumper
+
+ self.sigterm = False
+ self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
+ signal.signal(signal.SIGTERM, self._sigterm_exception)
+
+ def _sigterm_exception(self, signum, stackframe):
+ bb.warn("TestImage received SIGTERM, shutting down...")
+ self.sigterm = True
+ self.target.stop()
+
+ def install_uninstall_packages(self, test_id, install=True):
+ """
+ Check if the test requires a package and Install/Uninstall it in the DUT
+ """
+
+ pkg_dir = self.d.getVar("TEST_EXTRACTED_DIR")
+ super(ImageTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
+
+class ExportTestContext(RuntimeTestContext):
+ def __init__(self, d, target, exported=False, parsedArgs={}):
+ """
+ This class is used when exporting tests and when are executed outside OE environment.
+
+ parsedArgs can contain the following:
+ - tag: Filter test by tag.
+ """
+ super(ExportTestContext, self).__init__(d, target, exported)
+
+ tag = parsedArgs.get("tag", None)
+ self.tagexp = tag if tag != None else d.getVar("TEST_SUITES_TAGS")
+
+ self.sigterm = None
+
+ def install_uninstall_packages(self, test_id, install=True):
+ """
+ Check if the test requires a package and Install/Uninstall it in the DUT
+ """
+
+ export_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ extracted_dir = self.d.getVar("TEST_EXPORT_EXTRACTED_DIR")
+ pkg_dir = os.path.join(export_dir, extracted_dir)
+ super(ExportTestContext, self).install_uninstall_packages(test_id, pkg_dir, install)
diff --git a/poky/meta/lib/oeqa/runexported.py b/poky/meta/lib/oeqa/runexported.py
new file mode 100755
index 000000000..9cfea0f7a
--- /dev/null
+++ b/poky/meta/lib/oeqa/runexported.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+
+
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# This script should be used outside of the build system to run image tests.
+# It needs a json file as input as exported by the build.
+# E.g for an already built image:
+#- export the tests:
+# TEST_EXPORT_ONLY = "1"
+# TEST_TARGET = "simpleremote"
+# TEST_TARGET_IP = "192.168.7.2"
+# TEST_SERVER_IP = "192.168.7.1"
+# bitbake core-image-sato -c testimage
+# Setup your target, e.g for qemu: runqemu core-image-sato
+# cd build/tmp/testimage/core-image-sato
+# ./runexported.py testdata.json
+
+import sys
+import os
+import time
+import argparse
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "oeqa")))
+
+from oeqa.oetest import ExportTestContext
+from oeqa.utils.commands import runCmd, updateEnv
+from oeqa.utils.sshcontrol import SSHControl
+
+# this isn't pretty but we need a fake target object
+# for running the tests externally as we don't care
+# about deploy/start we only care about the connection methods (run, copy)
+class FakeTarget(object):
+ def __init__(self, d):
+ self.connection = None
+ self.ip = None
+ self.server_ip = None
+ self.datetime = time.strftime('%Y%m%d%H%M%S',time.gmtime())
+ self.testdir = d.getVar("TEST_LOG_DIR")
+ self.pn = d.getVar("PN")
+
+ def exportStart(self):
+ self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
+ sshloglink = os.path.join(self.testdir, "ssh_target_log")
+ if os.path.lexists(sshloglink):
+ os.remove(sshloglink)
+ os.symlink(self.sshlog, sshloglink)
+ print("SSH log file: %s" % self.sshlog)
+ self.connection = SSHControl(self.ip, logfile=self.sshlog)
+
+ def run(self, cmd, timeout=None):
+ return self.connection.run(cmd, timeout)
+
+ def copy_to(self, localpath, remotepath):
+ return self.connection.copy_to(localpath, remotepath)
+
+ def copy_from(self, remotepath, localpath):
+ return self.connection.copy_from(remotepath, localpath)
+
+
+class MyDataDict(dict):
+ def getVar(self, key, unused = None):
+ return self.get(key, "")
+
+def main():
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-t", "--target-ip", dest="ip", help="The IP address of the target machine. Use this to \
+ overwrite the value determined from TEST_TARGET_IP at build time")
+ parser.add_argument("-s", "--server-ip", dest="server_ip", help="The IP address of this machine. Use this to \
+ overwrite the value determined from TEST_SERVER_IP at build time.")
+ parser.add_argument("-d", "--deploy-dir", dest="deploy_dir", help="Full path to the package feeds, that this \
+ the contents of what used to be DEPLOY_DIR on the build machine. If not specified it will use the value \
+ specified in the json if that directory actually exists or it will error out.")
+ parser.add_argument("-l", "--log-dir", dest="log_dir", help="This sets the path for TEST_LOG_DIR. If not specified \
+ the current dir is used. This is used for usually creating a ssh log file and a scp test file.")
+ parser.add_argument("-a", "--tag", dest="tag", help="Only run test with specified tag.")
+ parser.add_argument("json", help="The json file exported by the build system", default="testdata.json", nargs='?')
+
+ args = parser.parse_args()
+
+ with open(args.json, "r") as f:
+ loaded = json.load(f)
+
+ if args.ip:
+ loaded["target"]["ip"] = args.ip
+ if args.server_ip:
+ loaded["target"]["server_ip"] = args.server_ip
+
+ d = MyDataDict()
+ for key in loaded["d"].keys():
+ d[key] = loaded["d"][key]
+
+ if args.log_dir:
+ d["TEST_LOG_DIR"] = args.log_dir
+ else:
+ d["TEST_LOG_DIR"] = os.path.abspath(os.path.dirname(__file__))
+ if args.deploy_dir:
+ d["DEPLOY_DIR"] = args.deploy_dir
+ else:
+ if not os.path.isdir(d["DEPLOY_DIR"]):
+ print("WARNING: The path to DEPLOY_DIR does not exist: %s" % d["DEPLOY_DIR"])
+
+ parsedArgs = {}
+ parsedArgs["tag"] = args.tag
+
+ extract_sdk(d)
+
+ target = FakeTarget(d)
+ for key in loaded["target"].keys():
+ setattr(target, key, loaded["target"][key])
+
+ target.exportStart()
+ tc = ExportTestContext(d, target, True, parsedArgs)
+ tc.loadTests()
+ tc.runTests()
+
+ return 0
+
+def extract_sdk(d):
+ """
+ Extract SDK if needed
+ """
+
+ export_dir = os.path.dirname(os.path.realpath(__file__))
+ tools_dir = d.getVar("TEST_EXPORT_SDK_DIR")
+ tarball_name = "%s.sh" % d.getVar("TEST_EXPORT_SDK_NAME")
+ tarball_path = os.path.join(export_dir, tools_dir, tarball_name)
+ extract_path = os.path.join(export_dir, "sysroot")
+ if os.path.isfile(tarball_path):
+ print ("Found SDK tarball %s. Extracting..." % tarball_path)
+ result = runCmd("%s -y -d %s" % (tarball_path, extract_path))
+ for f in os.listdir(extract_path):
+ if f.startswith("environment-setup"):
+ print("Setting up SDK environment...")
+ env_file = os.path.join(extract_path, f)
+ updateEnv(env_file)
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/poky/meta/lib/oeqa/runtime/case.py b/poky/meta/lib/oeqa/runtime/case.py
new file mode 100644
index 000000000..2f190acf1
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/case.py
@@ -0,0 +1,17 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+from oeqa.utils.package_manager import install_package, uninstall_package
+
+class OERuntimeTestCase(OETestCase):
+ # target instance set by OERuntimeTestLoader.
+ target = None
+
+ def setUp(self):
+ super(OERuntimeTestCase, self).setUp()
+ install_package(self)
+
+ def tearDown(self):
+ super(OERuntimeTestCase, self).tearDown()
+ uninstall_package(self)
diff --git a/poky/meta/lib/oeqa/runtime/cases/_qemutiny.py b/poky/meta/lib/oeqa/runtime/cases/_qemutiny.py
new file mode 100644
index 000000000..7b5b48141
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/_qemutiny.py
@@ -0,0 +1,8 @@
+from oeqa.runtime.case import OERuntimeTestCase
+
+class QemuTinyTest(OERuntimeTestCase):
+
+ def test_boot_tiny(self):
+ status, output = self.target.run_serial('uname -a')
+ msg = "Cannot detect poky tiny boot!"
+ self.assertTrue("yocto-tiny" in output, msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/apt.py b/poky/meta/lib/oeqa/runtime/cases/apt.py
new file mode 100644
index 000000000..8d4dd35c5
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/apt.py
@@ -0,0 +1,47 @@
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class AptTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'apt-get %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class AptRepoTest(AptTest):
+
+ @classmethod
+ def setUpClass(cls):
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_DEB'], 'all')
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; echo deb %s ./ > sources.list' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/apt/'
+ self.target.run('cd %s; rm sources.list' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'deb',
+ 'DEB is not the primary package manager')
+ @OEHasPackage(['apt'])
+ def test_apt_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove --yes run-postinsts-dev')
+ self.pkg('install --yes --allow-unauthenticated run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/poky/meta/lib/oeqa/runtime/cases/buildcpio.py b/poky/meta/lib/oeqa/runtime/cases/buildcpio.py
new file mode 100644
index 000000000..79b22d04d
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/buildcpio.py
@@ -0,0 +1,29 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class BuildCpioTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'https://downloads.yoctoproject.org/mirror/sources/cpio-2.12.tar.gz'
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+ cls.project.download_archive()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestID(205)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_cpio(self):
+ self.project.run_configure()
+ self.project.run_make()
+ self.project.run_install()
diff --git a/poky/meta/lib/oeqa/runtime/cases/buildgalculator.py b/poky/meta/lib/oeqa/runtime/cases/buildgalculator.py
new file mode 100644
index 000000000..7c9d4a392
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/buildgalculator.py
@@ -0,0 +1,28 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class GalculatorTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2'
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+ cls.project.download_archive()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestID(1526)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_galculator(self):
+ self.project.run_configure()
+ self.project.run_make()
diff --git a/poky/meta/lib/oeqa/runtime/cases/buildlzip.py b/poky/meta/lib/oeqa/runtime/cases/buildlzip.py
new file mode 100644
index 000000000..ca3fead2e
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/buildlzip.py
@@ -0,0 +1,34 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+from oeqa.runtime.utils.targetbuildproject import TargetBuildProject
+
+class BuildLzipTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ uri = 'http://downloads.yoctoproject.org/mirror/sources'
+ uri = '%s/lzip-1.19.tar.gz' % uri
+ cls.project = TargetBuildProject(cls.tc.target,
+ uri,
+ dl_dir = cls.tc.td['DL_DIR'])
+ cls.project.download_archive()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.project.clean()
+
+ @OETestID(206)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_lzip(self):
+ self.project.run_configure()
+ self.project.run_make()
+ self.project.run_install()
+
+ @classmethod
+ def tearDownClass(self):
+ self.project.clean()
diff --git a/poky/meta/lib/oeqa/runtime/cases/connman.py b/poky/meta/lib/oeqa/runtime/cases/connman.py
new file mode 100644
index 000000000..12456b417
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/connman.py
@@ -0,0 +1,30 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class ConnmanTest(OERuntimeTestCase):
+
+ def service_status(self, service):
+ if 'systemd' in self.tc.td['DISTRO_FEATURES']:
+ (_, output) = self.target.run('systemctl status -l %s' % service)
+ return output
+ else:
+ return "Unable to get status or logs for %s" % service
+
+ @OETestID(961)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["connman"])
+ def test_connmand_help(self):
+ (status, output) = self.target.run('/usr/sbin/connmand --help')
+ msg = 'Failed to get connman help. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(221)
+ @OETestDepends(['connman.ConnmanTest.test_connmand_help'])
+ def test_connmand_running(self):
+ cmd = '%s | grep [c]onnmand' % self.tc.target_cmds['ps']
+ (status, output) = self.target.run(cmd)
+ if status != 0:
+ self.logger.info(self.service_status("connman"))
+ self.fail("No connmand process running")
diff --git a/poky/meta/lib/oeqa/runtime/cases/date.py b/poky/meta/lib/oeqa/runtime/cases/date.py
new file mode 100644
index 000000000..ece7338de
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/date.py
@@ -0,0 +1,38 @@
+import re
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+
+class DateTest(OERuntimeTestCase):
+
+ def setUp(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Stopping systemd-timesyncd daemon')
+ self.target.run('systemctl stop systemd-timesyncd')
+
+ def tearDown(self):
+ if self.tc.td.get('VIRTUAL-RUNTIME_init_manager') == 'systemd':
+ self.logger.debug('Starting systemd-timesyncd daemon')
+ self.target.run('systemctl start systemd-timesyncd')
+
+ @OETestID(211)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_date(self):
+ (status, output) = self.target.run('date +"%Y-%m-%d %T"')
+ msg = 'Failed to get initial date, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+ oldDate = output
+
+ sampleDate = '"2016-08-09 10:00:00"'
+ (status, output) = self.target.run("date -s %s" % sampleDate)
+ self.assertEqual(status, 0, msg='Date set failed, output: %s' % output)
+
+ (status, output) = self.target.run("date -R")
+ p = re.match('Tue, 09 Aug 2016 10:00:.. \+0000', output)
+ msg = 'The date was not set correctly, output: %s' % output
+ self.assertTrue(p, msg=msg)
+
+ (status, output) = self.target.run('date -s "%s"' % oldDate)
+ msg = 'Failed to reset date, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/df.py b/poky/meta/lib/oeqa/runtime/cases/df.py
new file mode 100644
index 000000000..aecc32d7c
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/df.py
@@ -0,0 +1,13 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+
+class DfTest(OERuntimeTestCase):
+
+ @OETestID(234)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_df(self):
+ cmd = "df / | sed -n '2p' | awk '{print $4}'"
+ (status,output) = self.target.run(cmd)
+ msg = 'Not enough space on image. Current size is %s' % output
+ self.assertTrue(int(output)>5120, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/dnf.py b/poky/meta/lib/oeqa/runtime/cases/dnf.py
new file mode 100644
index 000000000..2f87296b4
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/dnf.py
@@ -0,0 +1,123 @@
+import os
+import re
+import subprocess
+from oeqa.utils.httpserver import HTTPService
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class DnfTest(OERuntimeTestCase):
+
+ def dnf(self, command, expected = 0):
+ command = 'dnf %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class DnfBasicTest(DnfTest):
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'rpm',
+ 'RPM is not the primary package manager')
+ @OEHasPackage(['dnf'])
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OETestID(1735)
+ def test_dnf_help(self):
+ self.dnf('--help')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ @OETestID(1739)
+ def test_dnf_version(self):
+ self.dnf('--version')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ @OETestID(1737)
+ def test_dnf_info(self):
+ self.dnf('info dnf')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ @OETestID(1738)
+ def test_dnf_search(self):
+ self.dnf('search dnf')
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ @OETestID(1736)
+ def test_dnf_history(self):
+ self.dnf('history')
+
+class DnfRepoTest(DnfTest):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.repo_server = HTTPService(os.path.join(cls.tc.td['WORKDIR'], 'oe-testimage-repo'),
+ cls.tc.target.server_ip)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def dnf_with_repo(self, command):
+ pkgarchs = os.listdir(os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo'))
+ deploy_url = 'http://%s:%s/' %(self.target.server_ip, self.repo_server.port)
+ cmdlinerepoopts = ["--repofrompath=oe-testimage-repo-%s,%s%s" %(arch, deploy_url, arch) for arch in pkgarchs]
+
+ self.dnf(" ".join(cmdlinerepoopts) + " --nogpgcheck " + command)
+
+ @OETestDepends(['dnf.DnfBasicTest.test_dnf_help'])
+ @OETestID(1744)
+ def test_dnf_makecache(self):
+ self.dnf_with_repo('makecache')
+
+
+# Does not work when repo is specified on the command line
+# @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+# def test_dnf_repolist(self):
+# self.dnf_with_repo('repolist')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @OETestID(1746)
+ def test_dnf_repoinfo(self):
+ self.dnf_with_repo('repoinfo')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_makecache'])
+ @OETestID(1740)
+ def test_dnf_install(self):
+ self.dnf_with_repo('install -y run-postinsts-dev')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
+ @OETestID(1741)
+ def test_dnf_install_dependency(self):
+ self.dnf_with_repo('remove -y run-postinsts')
+ self.dnf_with_repo('install -y run-postinsts-dev')
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install_dependency'])
+ @OETestID(1742)
+ def test_dnf_install_from_disk(self):
+ self.dnf_with_repo('remove -y run-postinsts-dev')
+ self.dnf_with_repo('install -y --downloadonly run-postinsts-dev')
+ status, output = self.target.run('find /var/cache/dnf -name run-postinsts-dev*rpm', 1500)
+ self.assertEqual(status, 0, output)
+ self.dnf_with_repo('install -y %s' % output)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install_from_disk'])
+ @OETestID(1743)
+ def test_dnf_install_from_http(self):
+ output = subprocess.check_output('%s %s -name run-postinsts-dev*' % (bb.utils.which(os.getenv('PATH'), "find"),
+ os.path.join(self.tc.td['WORKDIR'], 'oe-testimage-repo')), shell=True).decode("utf-8")
+ rpm_path = output.split("/")[-2] + "/" + output.split("/")[-1]
+ url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, rpm_path)
+ self.dnf_with_repo('remove -y run-postinsts-dev')
+ self.dnf_with_repo('install -y %s' % url)
+
+ @OETestDepends(['dnf.DnfRepoTest.test_dnf_install'])
+ @OETestID(1745)
+ def test_dnf_reinstall(self):
+ self.dnf_with_repo('reinstall -y run-postinsts-dev')
+
+
diff --git a/poky/meta/lib/oeqa/runtime/cases/gcc.py b/poky/meta/lib/oeqa/runtime/cases/gcc.py
new file mode 100644
index 000000000..911083156
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/gcc.py
@@ -0,0 +1,73 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class GccCompileTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ dst = '/tmp/'
+ src = os.path.join(cls.tc.files_dir, 'test.c')
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.runtime_files_dir, 'testmakefile')
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.files_dir, 'test.cpp')
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ files = '/tmp/test.c /tmp/test.o /tmp/test /tmp/testmakefile'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestID(203)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_gcc_compile(self):
+ status, output = self.target.run('gcc /tmp/test.c -o /tmp/test -lm')
+ msg = 'gcc compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(200)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_gpp_compile(self):
+ status, output = self.target.run('g++ /tmp/test.c -o /tmp/test -lm')
+ msg = 'g++ compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(1142)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_gpp2_compile(self):
+ status, output = self.target.run('g++ /tmp/test.cpp -o /tmp/test -lm')
+ msg = 'g++ compile failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('/tmp/test')
+ msg = 'running compiled file failed, output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(204)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_make(self):
+ status, output = self.target.run('cd /tmp; make -f testmakefile')
+ msg = 'running make failed, output %s' % output
+ self.assertEqual(status, 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/gi.py b/poky/meta/lib/oeqa/runtime/cases/gi.py
new file mode 100644
index 000000000..19073e52c
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/gi.py
@@ -0,0 +1,15 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class GObjectIntrospectionTest(OERuntimeTestCase):
+
+ @OETestDepends(["ssh.SSHTest.test_ssh"])
+ @OEHasPackage(["python3-pygobject"])
+ def test_python(self):
+ script = """from gi.repository import GObject; print(GObject.markup_escape_text("<testing&testing>"))"""
+ status, output = self.target.run("python3 -c '%s'" % script)
+ self.assertEqual(status, 0, msg="Python failed (%s)" % (output))
+ self.assertEqual(output, "&lt;testing&amp;testing&gt;", msg="Unexpected output (%s)" % output)
diff --git a/poky/meta/lib/oeqa/runtime/cases/kernelmodule.py b/poky/meta/lib/oeqa/runtime/cases/kernelmodule.py
new file mode 100644
index 000000000..de1a5aa44
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/kernelmodule.py
@@ -0,0 +1,40 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class KernelModuleTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ src = os.path.join(cls.tc.runtime_files_dir, 'hellomod.c')
+ dst = '/tmp/hellomod.c'
+ cls.tc.target.copyTo(src, dst)
+
+ src = os.path.join(cls.tc.runtime_files_dir, 'hellomod_makefile')
+ dst = '/tmp/Makefile'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ files = '/tmp/Makefile /tmp/hellomod.c'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestID(1541)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['gcc.GccCompileTest.test_gcc_compile'])
+ def test_kernel_module(self):
+ cmds = [
+ 'cd /usr/src/kernel && make scripts prepare',
+ 'cd /tmp && make',
+ 'cd /tmp && insmod hellomod.ko',
+ 'lsmod | grep hellomod',
+ 'dmesg | grep Hello',
+ 'rmmod hellomod', 'dmesg | grep "Cleaning up hellomod"'
+ ]
+ for cmd in cmds:
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
diff --git a/poky/meta/lib/oeqa/runtime/cases/ldd.py b/poky/meta/lib/oeqa/runtime/cases/ldd.py
new file mode 100644
index 000000000..c6d92fd5a
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/ldd.py
@@ -0,0 +1,25 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class LddTest(OERuntimeTestCase):
+
+ @OETestID(962)
+ @skipIfNotFeature('tools-sdk',
+ 'Test requires tools-sdk to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_ldd_exists(self):
+ status, output = self.target.run('which ldd')
+ msg = 'ldd does not exist in PATH: which ldd: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(239)
+ @OETestDepends(['ldd.LddTest.test_ldd_exists'])
+ def test_ldd_rtldlist_check(self):
+ cmd = ('for i in $(which ldd | xargs cat | grep "^RTLDLIST"| '
+ 'cut -d\'=\' -f2|tr -d \'"\'); '
+ 'do test -f $i && echo $i && break; done')
+ status, output = self.target.run(cmd)
+ msg = "ldd path not correct or RTLDLIST files don't exist."
+ self.assertEqual(status, 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/logrotate.py b/poky/meta/lib/oeqa/runtime/cases/logrotate.py
new file mode 100644
index 000000000..992fef298
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/logrotate.py
@@ -0,0 +1,42 @@
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=289 testcase
+# Note that the image under test must have logrotate installed
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class LogrotateTest(OERuntimeTestCase):
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.tc.target.run('rm -rf $HOME/logrotate_dir')
+
+ @OETestID(1544)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['logrotate'])
+ def test_1_logrotate_setup(self):
+ status, output = self.target.run('mkdir $HOME/logrotate_dir')
+ msg = 'Could not create logrotate_dir. Output: %s' % output
+ self.assertEqual(status, 0, msg = msg)
+
+ cmd = ('sed -i "s#wtmp {#wtmp {\\n olddir $HOME/logrotate_dir#"'
+ ' /etc/logrotate.conf')
+ status, output = self.target.run(cmd)
+ msg = ('Could not write to logrotate.conf file. Status and output: '
+ ' %s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ @OETestID(1542)
+ @OETestDepends(['logrotate.LogrotateTest.test_1_logrotate_setup'])
+ def test_2_logrotate(self):
+ status, output = self.target.run('logrotate -f /etc/logrotate.conf')
+ msg = ('logrotate service could not be reloaded. Status and output: '
+ '%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ _, output = self.target.run('ls -la $HOME/logrotate_dir/ | wc -l')
+ msg = ('new logfile could not be created. List of files within log '
+ 'directory: %s' % (
+ self.target.run('ls -la $HOME/logrotate_dir')[1]))
+ self.assertTrue(int(output)>=3, msg = msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/multilib.py b/poky/meta/lib/oeqa/runtime/cases/multilib.py
new file mode 100644
index 000000000..8c167f100
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/multilib.py
@@ -0,0 +1,41 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotInDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class MultilibTest(OERuntimeTestCase):
+
+ def archtest(self, binary, arch):
+ """
+ Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64).
+ """
+
+ status, output = self.target.run('readelf -h %s' % binary)
+ self.assertEqual(status, 0, 'Failed to readelf %s' % binary)
+
+ l = [l.split()[1] for l in output.split('\n') if "Class:" in l]
+ if l:
+ theclass = l[0]
+ else:
+ self.fail('Cannot parse readelf. Output:\n%s' % output)
+
+ msg = "%s isn't %s (is %s)" % (binary, arch, theclass)
+ self.assertEqual(theclass, arch, msg=msg)
+
+ @OETestID(1593)
+ @skipIfNotInDataVar('MULTILIBS', 'multilib:lib32',
+ "This isn't a multilib:lib32 image")
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_check_multilib_libc(self):
+ """
+ Check that a multilib image has both 32-bit and 64-bit libc in.
+ """
+ self.archtest("/lib/libc.so.6", "ELF32")
+ self.archtest("/lib64/libc.so.6", "ELF64")
+
+ @OETestID(279)
+ @OETestDepends(['multilib.MultilibTest.test_check_multilib_libc'])
+ @OEHasPackage(['lib32-connman'])
+ def test_file_connman(self):
+ self.archtest("/usr/sbin/connmand", "ELF32")
diff --git a/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py b/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py
new file mode 100644
index 000000000..005b6978d
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/oe_syslog.py
@@ -0,0 +1,66 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class SyslogTest(OERuntimeTestCase):
+
+ @OETestID(201)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(["busybox-syslog", "sysklogd"])
+ def test_syslog_running(self):
+ cmd = '%s | grep -i [s]yslogd' % self.tc.target_cmds['ps']
+ status, output = self.target.run(cmd)
+ msg = "No syslogd process; ps output: %s" % output
+ self.assertEqual(status, 0, msg=msg)
+
+class SyslogTestConfig(OERuntimeTestCase):
+
+ @OETestID(1149)
+ @OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
+ def test_syslog_logger(self):
+ status, output = self.target.run('logger foobar')
+ msg = "Can't log into syslog. Output: %s " % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('grep foobar /var/log/messages')
+ if status != 0:
+ if self.tc.td.get("VIRTUAL-RUNTIME_init_manager") == "systemd":
+ status, output = self.target.run('journalctl -o cat | grep foobar')
+ else:
+ status, output = self.target.run('logread | grep foobar')
+ msg = ('Test log string not found in /var/log/messages or logread.'
+ ' Output: %s ' % output)
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(1150)
+ @OETestDepends(['oe_syslog.SyslogTest.test_syslog_running'])
+ def test_syslog_restart(self):
+ if "systemd" != self.tc.td.get("VIRTUAL-RUNTIME_init_manager", ""):
+ (_, _) = self.target.run('/etc/init.d/syslog restart')
+ else:
+ (_, _) = self.target.run('systemctl restart syslog.service')
+
+
+ @OETestID(202)
+ @OETestDepends(['oe_syslog.SyslogTestConfig.test_syslog_logger'])
+ @OEHasPackage(["!sysklogd", "busybox"])
+ @skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'Not appropiate for systemd image')
+ def test_syslog_startup_config(self):
+ cmd = 'echo "LOGFILE=/var/log/test" >> /etc/syslog-startup.conf'
+ self.target.run(cmd)
+ status, output = self.target.run('/etc/init.d/syslog restart')
+ msg = ('Could not restart syslog service. Status and output:'
+ ' %s and %s' % (status,output))
+ self.assertEqual(status, 0, msg)
+
+ cmd = 'logger foobar && grep foobar /var/log/test'
+ status,output = self.target.run(cmd)
+ msg = 'Test log string not found. Output: %s ' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ cmd = "sed -i 's#LOGFILE=/var/log/test##' /etc/syslog-startup.conf"
+ self.target.run(cmd)
+ self.target.run('/etc/init.d/syslog restart')
diff --git a/poky/meta/lib/oeqa/runtime/cases/opkg.py b/poky/meta/lib/oeqa/runtime/cases/opkg.py
new file mode 100644
index 000000000..671ee06c7
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/opkg.py
@@ -0,0 +1,47 @@
+import os
+from oeqa.utils.httpserver import HTTPService
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.data import skipIfNotDataVar, skipIfNotFeature
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class OpkgTest(OERuntimeTestCase):
+
+ def pkg(self, command, expected = 0):
+ command = 'opkg %s' % command
+ status, output = self.target.run(command, 1500)
+ message = os.linesep.join([command, output])
+ self.assertEqual(status, expected, message)
+ return output
+
+class OpkgRepoTest(OpkgTest):
+
+ @classmethod
+ def setUpClass(cls):
+ service_repo = os.path.join(cls.tc.td['DEPLOY_DIR_IPK'], 'all')
+ cls.repo_server = HTTPService(service_repo, cls.tc.target.server_ip)
+ cls.repo_server.start()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.repo_server.stop()
+
+ def setup_source_config_for_package_install(self):
+ apt_get_source_server = 'http://%s:%s/' % (self.tc.target.server_ip, self.repo_server.port)
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; echo src/gz all %s >> opkg.conf' % (apt_get_sourceslist_dir, apt_get_source_server))
+
+ def cleanup_source_config_for_package_install(self):
+ apt_get_sourceslist_dir = '/etc/opkg/'
+ self.target.run('cd %s; sed -i "/^src/d" opkg.conf' % (apt_get_sourceslist_dir))
+
+ @skipIfNotFeature('package-management',
+ 'Test requires package-management to be in IMAGE_FEATURES')
+ @skipIfNotDataVar('IMAGE_PKGTYPE', 'ipk',
+ 'IPK is not the primary package manager')
+ @OEHasPackage(['opkg'])
+ def test_opkg_install_from_repo(self):
+ self.setup_source_config_for_package_install()
+ self.pkg('update')
+ self.pkg('remove run-postinsts-dev')
+ self.pkg('install run-postinsts-dev')
+ self.cleanup_source_config_for_package_install()
diff --git a/poky/meta/lib/oeqa/runtime/cases/pam.py b/poky/meta/lib/oeqa/runtime/cases/pam.py
new file mode 100644
index 000000000..3654cdc94
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/pam.py
@@ -0,0 +1,33 @@
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=287 testcase
+# Note that the image under test must have "pam" in DISTRO_FEATURES
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class PamBasicTest(OERuntimeTestCase):
+
+ @OETestID(1543)
+ @skipIfNotFeature('pam', 'Test requires pam to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_pam(self):
+ status, output = self.target.run('login --help')
+ msg = ('login command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 1, msg = msg)
+
+ status, output = self.target.run('passwd --help')
+ msg = ('passwd command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ status, output = self.target.run('su --help')
+ msg = ('su command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
+
+ status, output = self.target.run('useradd --help')
+ msg = ('useradd command does not work as expected. '
+ 'Status and output:%s and %s' % (status, output))
+ self.assertEqual(status, 0, msg = msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/parselogs.py b/poky/meta/lib/oeqa/runtime/cases/parselogs.py
new file mode 100644
index 000000000..1f36c6108
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/parselogs.py
@@ -0,0 +1,363 @@
+import os
+
+from subprocess import check_output
+from shutil import rmtree
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+#in the future these lists could be moved outside of module
+errors = ["error", "cannot", "can\'t", "failed"]
+
+common_errors = [
+ "(WW) warning, (EE) error, (NI) not implemented, (??) unknown.",
+ "dma timeout",
+ "can\'t add hid device:",
+ "usbhid: probe of ",
+ "_OSC failed (AE_ERROR)",
+ "_OSC failed (AE_SUPPORT)",
+ "AE_ALREADY_EXISTS",
+ "ACPI _OSC request failed (AE_SUPPORT)",
+ "can\'t disable ASPM",
+ "Failed to load module \"vesa\"",
+ "Failed to load module vesa",
+ "Failed to load module \"modesetting\"",
+ "Failed to load module modesetting",
+ "Failed to load module \"glx\"",
+ "Failed to load module \"fbdev\"",
+ "Failed to load module fbdev",
+ "Failed to load module glx",
+ "[drm] Cannot find any crtc or sizes - going 1024x768",
+ "_OSC failed (AE_NOT_FOUND); disabling ASPM",
+ "Open ACPI failed (/var/run/acpid.socket) (No such file or directory)",
+ "NX (Execute Disable) protection cannot be enabled: non-PAE kernel!",
+ "hd.: possibly failed opcode",
+ 'NETLINK INITIALIZATION FAILED',
+ 'kernel: Cannot find map file',
+ 'omap_hwmod: debugss: _wait_target_disable failed',
+ 'VGA arbiter: cannot open kernel arbiter, no multi-card support',
+ 'Failed to find URL:http://ipv4.connman.net/online/status.html',
+ 'Online check failed for',
+ 'netlink init failed',
+ 'Fast TSC calibration',
+ "BAR 0-9",
+ "Failed to load module \"ati\"",
+ "controller can't do DEVSLP, turning off",
+ "stmmac_dvr_probe: warning: cannot get CSR clock",
+ "error: couldn\'t mount because of unsupported optional features",
+ "GPT: Use GNU Parted to correct GPT errors",
+ "Cannot set xattr user.Librepo.DownloadInProgress",
+ ]
+
+video_related = [
+ "uvesafb",
+]
+
+x86_common = [
+ '[drm:psb_do_init] *ERROR* Debug is',
+ 'wrong ELF class',
+ 'Could not enable PowerButton event',
+ 'probe of LNXPWRBN:00 failed with error -22',
+ 'pmd_set_huge: Cannot satisfy',
+ 'failed to setup card detect gpio',
+ 'amd_nb: Cannot enumerate AMD northbridges',
+ 'failed to retrieve link info, disabling eDP',
+ 'Direct firmware load for iwlwifi',
+] + common_errors
+
+qemux86_common = [
+ 'wrong ELF class',
+ "fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
+ "can't claim BAR ",
+ 'amd_nb: Cannot enumerate AMD northbridges',
+ 'uvesafb: 5000 ms task timeout, infinitely waiting',
+ 'tsc: HPET/PMTIMER calibration failed',
+] + common_errors
+
+ignore_errors = {
+ 'default' : common_errors,
+ 'qemux86' : [
+ 'Failed to access perfctr msr (MSR',
+ 'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
+ ] + qemux86_common,
+ 'qemux86-64' : qemux86_common,
+ 'qemumips' : [
+ 'Failed to load module "glx"',
+ 'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
+ 'cacheinfo: Failed to find cpu0 device node',
+ ] + common_errors,
+ 'qemumips64' : [
+ 'pci 0000:00:00.0: [Firmware Bug]: reg 0x..: invalid BAR (can\'t size)',
+ 'cacheinfo: Failed to find cpu0 device node',
+ ] + common_errors,
+ 'qemuppc' : [
+ 'PCI 0000:00 Cannot reserve Legacy IO [io 0x0000-0x0fff]',
+ 'host side 80-wire cable detection failed, limiting max speed',
+ 'mode "640x480" test failed',
+ 'Failed to load module "glx"',
+ 'can\'t handle BAR above 4GB',
+ 'Cannot reserve Legacy IO',
+ ] + common_errors,
+ 'qemuarm' : [
+ 'mmci-pl18x: probe of fpga:05 failed with error -22',
+ 'mmci-pl18x: probe of fpga:0b failed with error -22',
+ 'Failed to load module "glx"',
+ 'OF: amba_device_add() failed (-19) for /amba/smc@10100000',
+ 'OF: amba_device_add() failed (-19) for /amba/mpmc@10110000',
+ 'OF: amba_device_add() failed (-19) for /amba/sctl@101e0000',
+ 'OF: amba_device_add() failed (-19) for /amba/watchdog@101e1000',
+ 'OF: amba_device_add() failed (-19) for /amba/sci@101f0000',
+ 'OF: amba_device_add() failed (-19) for /amba/ssp@101f4000',
+ 'OF: amba_device_add() failed (-19) for /amba/fpga/sci@a000',
+ 'Failed to initialize \'/amba/timer@101e3000\': -22',
+ 'jitterentropy: Initialization failed with host not compliant with requirements: 2',
+ ] + common_errors,
+ 'qemuarm64' : [
+ 'Fatal server error:',
+ '(EE) Server terminated with error (1). Closing log file.',
+ 'dmi: Firmware registration failed.',
+ 'irq: type mismatch, failed to map hwirq-27 for /intc',
+ ] + common_errors,
+ 'emenlow' : [
+ '[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
+ '(EE) Failed to load module "psb"',
+ '(EE) Failed to load module psb',
+ '(EE) Failed to load module "psbdrv"',
+ '(EE) Failed to load module psbdrv',
+ '(EE) open /dev/fb0: No such file or directory',
+ '(EE) AIGLX: reverting to software rendering',
+ ] + x86_common,
+ 'intel-core2-32' : [
+ 'ACPI: No _BQC method, cannot determine initial brightness',
+ '[Firmware Bug]: ACPI: No _BQC method, cannot determine initial brightness',
+ '(EE) Failed to load module "psb"',
+ '(EE) Failed to load module psb',
+ '(EE) Failed to load module "psbdrv"',
+ '(EE) Failed to load module psbdrv',
+ '(EE) open /dev/fb0: No such file or directory',
+ '(EE) AIGLX: reverting to software rendering',
+ 'dmi: Firmware registration failed.',
+ 'ioremap error for 0x78',
+ ] + x86_common,
+ 'intel-corei7-64' : [
+ 'can\'t set Max Payload Size to 256',
+ 'intel_punit_ipc: can\'t request region for resource',
+ '[drm] parse error at position 4 in video mode \'efifb\'',
+ 'ACPI Error: Could not enable RealTimeClock event',
+ 'ACPI Warning: Could not enable fixed event - RealTimeClock',
+ 'hci_intel INT33E1:00: Unable to retrieve gpio',
+ 'hci_intel: probe of INT33E1:00 failed',
+ 'can\'t derive routing for PCI INT A',
+ 'failed to read out thermal zone',
+ 'Bluetooth: hci0: Setting Intel event mask failed',
+ 'ttyS2 - failed to request DMA',
+ 'Bluetooth: hci0: Failed to send firmware data (-38)',
+ 'atkbd serio0: Failed to enable keyboard on isa0060/serio0',
+ ] + x86_common,
+ 'crownbay' : x86_common,
+ 'genericx86' : x86_common,
+ 'genericx86-64' : [
+ 'Direct firmware load for i915',
+ 'Failed to load firmware i915',
+ 'Failed to fetch GuC',
+ 'Failed to initialize GuC',
+ 'Failed to load DMC firmware',
+ 'The driver is built-in, so to load the firmware you need to',
+ ] + x86_common,
+ 'edgerouter' : [
+ 'Fatal server error:',
+ ] + common_errors,
+ 'jasperforest' : [
+ 'Activated service \'org.bluez\' failed:',
+ 'Unable to find NFC netlink family',
+ ] + common_errors,
+}
+
+log_locations = ["/var/log/","/var/log/dmesg", "/tmp/dmesg_output.log"]
+
+class ParseLogsTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.errors = errors
+
+ # When systemd is enabled we need to notice errors on
+ # circular dependencies in units.
+ if 'systemd' in cls.td.get('DISTRO_FEATURES', ''):
+ cls.errors.extend([
+ 'Found ordering cycle on',
+ 'Breaking ordering cycle by deleting job',
+ 'deleted to break ordering cycle',
+ 'Ordering cycle found, skipping',
+ ])
+
+ cls.ignore_errors = ignore_errors
+ cls.log_locations = log_locations
+ cls.msg = ''
+ is_lsb, _ = cls.tc.target.run("which LSB_Test.sh")
+ if is_lsb == 0:
+ for machine in cls.ignore_errors:
+ cls.ignore_errors[machine] = cls.ignore_errors[machine] \
+ + video_related
+
+ def getMachine(self):
+ return self.td.get('MACHINE', '')
+
+ def getWorkdir(self):
+ return self.td.get('WORKDIR', '')
+
+ # Get some information on the CPU of the machine to display at the
+ # beginning of the output. This info might be useful in some cases.
+ def getHardwareInfo(self):
+ hwi = ""
+ cmd = ('cat /proc/cpuinfo | grep "model name" | head -n1 | '
+ " awk 'BEGIN{FS=\":\"}{print $2}'")
+ _, cpu_name = self.target.run(cmd)
+
+ cmd = ('cat /proc/cpuinfo | grep "cpu cores" | head -n1 | '
+ "awk {'print $4'}")
+ _, cpu_physical_cores = self.target.run(cmd)
+
+ cmd = 'cat /proc/cpuinfo | grep "processor" | wc -l'
+ _, cpu_logical_cores = self.target.run(cmd)
+
+ _, cpu_arch = self.target.run('uname -m')
+
+ hwi += 'Machine information: \n'
+ hwi += '*******************************\n'
+ hwi += 'Machine name: ' + self.getMachine() + '\n'
+ hwi += 'CPU: ' + str(cpu_name) + '\n'
+ hwi += 'Arch: ' + str(cpu_arch)+ '\n'
+ hwi += 'Physical cores: ' + str(cpu_physical_cores) + '\n'
+ hwi += 'Logical cores: ' + str(cpu_logical_cores) + '\n'
+ hwi += '*******************************\n'
+
+ return hwi
+
+ # Go through the log locations provided and if it's a folder
+ # create a list with all the .log files in it, if it's a file
+ # just add it to that list.
+ def getLogList(self, log_locations):
+ logs = []
+ for location in log_locations:
+ status, _ = self.target.run('test -f ' + str(location))
+ if status == 0:
+ logs.append(str(location))
+ else:
+ status, _ = self.target.run('test -d ' + str(location))
+ if status == 0:
+ cmd = 'find ' + str(location) + '/*.log -maxdepth 1 -type f'
+ status, output = self.target.run(cmd)
+ if status == 0:
+ output = output.splitlines()
+ for logfile in output:
+ logs.append(os.path.join(location, str(logfile)))
+ return logs
+
+ # Copy the log files to be parsed locally
+ def transfer_logs(self, log_list):
+ workdir = self.getWorkdir()
+ self.target_logs = workdir + '/' + 'target_logs'
+ target_logs = self.target_logs
+ if os.path.exists(target_logs):
+ rmtree(self.target_logs)
+ os.makedirs(target_logs)
+ for f in log_list:
+ self.target.copyFrom(str(f), target_logs)
+
+ # Get the local list of logs
+ def get_local_log_list(self, log_locations):
+ self.transfer_logs(self.getLogList(log_locations))
+ list_dir = os.listdir(self.target_logs)
+ dir_files = [os.path.join(self.target_logs, f) for f in list_dir]
+ logs = [f for f in dir_files if os.path.isfile(f)]
+ return logs
+
+ # Build the grep command to be used with filters and exclusions
+ def build_grepcmd(self, errors, ignore_errors, log):
+ grepcmd = 'grep '
+ grepcmd += '-Ei "'
+ for error in errors:
+ grepcmd += error + '|'
+ grepcmd = grepcmd[:-1]
+ grepcmd += '" ' + str(log) + " | grep -Eiv \'"
+
+ try:
+ errorlist = ignore_errors[self.getMachine()]
+ except KeyError:
+ self.msg += 'No ignore list found for this machine, using default\n'
+ errorlist = ignore_errors['default']
+
+ for ignore_error in errorlist:
+ ignore_error = ignore_error.replace('(', '\(')
+ ignore_error = ignore_error.replace(')', '\)')
+ ignore_error = ignore_error.replace("'", '.')
+ ignore_error = ignore_error.replace('?', '\?')
+ ignore_error = ignore_error.replace('[', '\[')
+ ignore_error = ignore_error.replace(']', '\]')
+ ignore_error = ignore_error.replace('*', '\*')
+ ignore_error = ignore_error.replace('0-9', '[0-9]')
+ grepcmd += ignore_error + '|'
+ grepcmd = grepcmd[:-1]
+ grepcmd += "\'"
+
+ return grepcmd
+
+ # Grep only the errors so that their context could be collected.
+ # Default context is 10 lines before and after the error itself
+ def parse_logs(self, errors, ignore_errors, logs,
+ lines_before = 10, lines_after = 10):
+ results = {}
+ rez = []
+ grep_output = ''
+
+ for log in logs:
+ result = None
+ thegrep = self.build_grepcmd(errors, ignore_errors, log)
+
+ try:
+ result = check_output(thegrep, shell=True).decode('utf-8')
+ except:
+ pass
+
+ if result is not None:
+ results[log.replace('target_logs/','')] = {}
+ rez = result.splitlines()
+
+ for xrez in rez:
+ try:
+ cmd = ['grep', '-F', xrez, '-B', str(lines_before)]
+ cmd += ['-A', str(lines_after), log]
+ grep_output = check_output(cmd).decode('utf-8')
+ except:
+ pass
+ results[log.replace('target_logs/','')][xrez]=grep_output
+
+ return results
+
+ # Get the output of dmesg and write it in a file.
+ # This file is added to log_locations.
+ def write_dmesg(self):
+ (status, dmesg) = self.target.run('dmesg > /tmp/dmesg_output.log')
+
+ @OETestID(1059)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_parselogs(self):
+ self.write_dmesg()
+ log_list = self.get_local_log_list(self.log_locations)
+ result = self.parse_logs(self.errors, self.ignore_errors, log_list)
+ print(self.getHardwareInfo())
+ errcount = 0
+ for log in result:
+ self.msg += 'Log: ' + log + '\n'
+ self.msg += '-----------------------\n'
+ for error in result[log]:
+ errcount += 1
+ self.msg += 'Central error: ' + str(error) + '\n'
+ self.msg += '***********************\n'
+ self.msg += result[str(log)][str(error)] + '\n'
+ self.msg += '***********************\n'
+ self.msg += '%s errors found in logs.' % errcount
+ self.assertEqual(errcount, 0, msg=self.msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/perl.py b/poky/meta/lib/oeqa/runtime/cases/perl.py
new file mode 100644
index 000000000..d0b7e8ed9
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/perl.py
@@ -0,0 +1,37 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class PerlTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ src = os.path.join(cls.tc.files_dir, 'test.pl')
+ dst = '/tmp/test.pl'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ dst = '/tmp/test.pl'
+ cls.tc.target.run('rm %s' % dst)
+
+ @OETestID(1141)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['perl'])
+ def test_perl_exists(self):
+ status, output = self.target.run('which perl')
+ msg = 'Perl binary not in PATH or not on target.'
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(208)
+ @OETestDepends(['perl.PerlTest.test_perl_exists'])
+ def test_perl_works(self):
+ status, output = self.target.run('perl /tmp/test.pl')
+ msg = 'Exit status was not 0. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ msg = 'Incorrect output: %s' % output
+ self.assertEqual(output, "the value of a is 0.01", msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/ping.py b/poky/meta/lib/oeqa/runtime/cases/ping.py
new file mode 100644
index 000000000..02f580abe
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/ping.py
@@ -0,0 +1,24 @@
+from subprocess import Popen, PIPE
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.oetimeout import OETimeout
+
+class PingTest(OERuntimeTestCase):
+
+ @OETimeout(30)
+ @OETestID(964)
+ def test_ping(self):
+ output = ''
+ count = 0
+ while count < 5:
+ cmd = 'ping -c 1 %s' % self.target.ip
+ proc = Popen(cmd, shell=True, stdout=PIPE)
+ output += proc.communicate()[0].decode('utf-8')
+ if proc.poll() == 0:
+ count += 1
+ else:
+ count = 0
+ msg = ('Expected 5 consecutive, got %d.\n'
+ 'ping output is:\n%s' % (count,output))
+ self.assertEqual(count, 5, msg = msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/ptest.py b/poky/meta/lib/oeqa/runtime/cases/ptest.py
new file mode 100644
index 000000000..f60a433d5
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/ptest.py
@@ -0,0 +1,93 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+from oeqa.utils.logparser import Lparser, Result
+
+class PtestRunnerTest(OERuntimeTestCase):
+
+ # a ptest log parser
+ def parse_ptest(self, logfile):
+ parser = Lparser(test_0_pass_regex="^PASS:(.+)",
+ test_0_fail_regex="^FAIL:(.+)",
+ test_0_skip_regex="^SKIP:(.+)",
+ section_0_begin_regex="^BEGIN: .*/(.+)/ptest",
+ section_0_end_regex="^END: .*/(.+)/ptest")
+ parser.init()
+ result = Result()
+
+ with open(logfile, errors='replace') as f:
+ for line in f:
+ result_tuple = parser.parse_line(line)
+ if not result_tuple:
+ continue
+ result_tuple = line_type, category, status, name = parser.parse_line(line)
+
+ if line_type == 'section' and status == 'begin':
+ current_section = name
+ continue
+
+ if line_type == 'section' and status == 'end':
+ current_section = None
+ continue
+
+ if line_type == 'test' and status == 'pass':
+ result.store(current_section, name, status)
+ continue
+
+ if line_type == 'test' and status == 'fail':
+ result.store(current_section, name, status)
+ continue
+
+ if line_type == 'test' and status == 'skip':
+ result.store(current_section, name, status)
+ continue
+
+ result.sort_tests()
+ return result
+
+ @OETestID(1600)
+ @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_ptestrunner(self):
+ status, output = self.target.run('which ptest-runner', 0)
+ if status != 0:
+ self.skipTest("No -ptest packages are installed in the image")
+
+ import datetime
+
+ test_log_dir = self.td.get('TEST_LOG_DIR', '')
+ # The TEST_LOG_DIR maybe NULL when testimage is added after
+ # testdata.json is generated.
+ if not test_log_dir:
+ test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
+ # Don't use self.td.get('DATETIME'), it's from testdata.json, not
+ # up-to-date, and may cause "File exists" when re-reun.
+ datetime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
+ ptest_log_dir = '%s.%s' % (ptest_log_dir_link, datetime)
+ ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')
+
+ status, output = self.target.run('ptest-runner', 0)
+ os.makedirs(ptest_log_dir)
+ with open(ptest_runner_log, 'w') as f:
+ f.write(output)
+
+ # status != 0 is OK since some ptest tests may fail
+ self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
+
+ # Parse and save results
+ parse_result = self.parse_ptest(ptest_runner_log)
+ parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
+ if os.path.exists(ptest_log_dir_link):
+ # Remove the old link to create a new one
+ os.remove(ptest_log_dir_link)
+ os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
+
+ failed_tests = {}
+ for section in parse_result.result_dict:
+ failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ]
+ if failed_testcases:
+ failed_tests[section] = failed_testcases
+
+ self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests)))
diff --git a/poky/meta/lib/oeqa/runtime/cases/python.py b/poky/meta/lib/oeqa/runtime/cases/python.py
new file mode 100644
index 000000000..bf3e17916
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/python.py
@@ -0,0 +1,43 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class PythonTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ src = os.path.join(cls.tc.files_dir, 'test.py')
+ dst = '/tmp/test.py'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ dst = '/tmp/test.py'
+ cls.tc.target.run('rm %s' % dst)
+
+ @OETestID(1145)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['python-core'])
+ def test_python_exists(self):
+ status, output = self.target.run('which python')
+ msg = 'Python binary not in PATH or not on target.'
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(965)
+ @OETestDepends(['python.PythonTest.test_python_exists'])
+ def test_python_stdout(self):
+ status, output = self.target.run('python /tmp/test.py')
+ msg = 'Exit status was not 0. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ msg = 'Incorrect output: %s' % output
+ self.assertEqual(output, "the value of a is 0.01", msg=msg)
+
+ @OETestID(1146)
+ @OETestDepends(['python.PythonTest.test_python_stdout'])
+ def test_python_testfile(self):
+ status, output = self.target.run('ls /tmp/testfile.python')
+ self.assertEqual(status, 0, msg='Python test file generate failed.')
diff --git a/poky/meta/lib/oeqa/runtime/cases/rpm.py b/poky/meta/lib/oeqa/runtime/cases/rpm.py
new file mode 100644
index 000000000..05b94c7b4
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/rpm.py
@@ -0,0 +1,142 @@
+import os
+import fnmatch
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.utils.path import findFile
+
+class RpmBasicTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ if cls.tc.td['PACKAGE_CLASSES'].split()[0] != 'package_rpm':
+ cls.skipTest('Tests require image to be build from rpm')
+
+ @OETestID(960)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_rpm_help(self):
+ status, output = self.target.run('rpm --help')
+ msg = 'status and output: %s and %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(191)
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_help'])
+ def test_rpm_query(self):
+ status, output = self.target.run('rpm -q rpm')
+ msg = 'status and output: %s and %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+class RpmInstallRemoveTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ if cls.tc.td['PACKAGE_CLASSES'].split()[0] != 'package_rpm':
+ cls.skipTest('Tests require image to be build from rpm')
+
+ pkgarch = cls.td['TUNE_PKGARCH'].replace('-', '_')
+ rpmdir = os.path.join(cls.tc.td['DEPLOY_DIR'], 'rpm', pkgarch)
+ # Pick rpm-doc as a test file to get installed, because it's small
+ # and it will always be built for standard targets
+ rpm_doc = 'rpm-doc-*.%s.rpm' % pkgarch
+ for f in fnmatch.filter(os.listdir(rpmdir), rpm_doc):
+ test_file = os.path.join(rpmdir, f)
+ dst = '/tmp/rpm-doc.rpm'
+ cls.tc.target.copyTo(test_file, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ dst = '/tmp/rpm-doc.rpm'
+ cls.tc.target.run('rm -f %s' % dst)
+
+ @OETestID(192)
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_help'])
+ def test_rpm_install(self):
+ status, output = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
+ msg = 'Failed to install rpm-doc package: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(194)
+ @OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_install'])
+ def test_rpm_remove(self):
+ status,output = self.target.run('rpm -e rpm-doc')
+ msg = 'Failed to remove rpm-doc package: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(1096)
+ @OETestDepends(['rpm.RpmBasicTest.test_rpm_query'])
+ def test_rpm_query_nonroot(self):
+
+ def set_up_test_user(u):
+ status, output = self.target.run('id -u %s' % u)
+ if status:
+ status, output = self.target.run('useradd %s' % u)
+ msg = 'Failed to create new user: %s' % output
+ self.assertTrue(status == 0, msg=msg)
+
+ def exec_as_test_user(u):
+ status, output = self.target.run('su -c id %s' % u)
+ msg = 'Failed to execute as new user'
+ self.assertTrue("({0})".format(u) in output, msg=msg)
+
+ status, output = self.target.run('su -c "rpm -qa" %s ' % u)
+ msg = 'status: %s. Cannot run rpm -qa: %s' % (status, output)
+ self.assertEqual(status, 0, msg=msg)
+
+ def unset_up_test_user(u):
+ status, output = self.target.run('userdel -r %s' % u)
+ msg = 'Failed to erase user: %s' % output
+ self.assertTrue(status == 0, msg=msg)
+
+ tuser = 'test1'
+
+ try:
+ set_up_test_user(tuser)
+ exec_as_test_user(tuser)
+ finally:
+ unset_up_test_user(tuser)
+
+ @OETestID(195)
+ @OETestDepends(['rpm.RpmInstallRemoveTest.test_rpm_remove'])
+ def test_check_rpm_install_removal_log_file_size(self):
+ """
+ Summary: Check that rpm writes into /var/log/messages
+ Expected: There should be some RPM prefixed entries in the above file.
+ Product: BSPs
+ Author: Alexandru Georgescu <alexandru.c.georgescu@intel.com>
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+ db_files_cmd = 'ls /var/lib/rpm/__db.*'
+ check_log_cmd = "grep RPM /var/log/messages | wc -l"
+
+ # Make sure that some database files are under /var/lib/rpm as '__db.xxx'
+ status, output = self.target.run(db_files_cmd)
+ msg = 'Failed to find database files under /var/lib/rpm/ as __db.xxx'
+ self.assertEqual(0, status, msg=msg)
+
+ # Remove the package just in case
+ self.target.run('rpm -e rpm-doc')
+
+ # Install/Remove a package 10 times
+ for i in range(10):
+ status, output = self.target.run('rpm -ivh /tmp/rpm-doc.rpm')
+ msg = 'Failed to install rpm-doc package. Reason: {}'.format(output)
+ self.assertEqual(0, status, msg=msg)
+
+ status, output = self.target.run('rpm -e rpm-doc')
+ msg = 'Failed to remove rpm-doc package. Reason: {}'.format(output)
+ self.assertEqual(0, status, msg=msg)
+
+ # if using systemd this should ensure all entries are flushed to /var
+ status, output = self.target.run("journalctl --sync")
+ # Get the amount of entries in the log file
+ status, output = self.target.run(check_log_cmd)
+ msg = 'Failed to get the final size of the log file.'
+ self.assertEqual(0, status, msg=msg)
+
+ # Check that there's enough of them
+ self.assertGreaterEqual(int(output), 80,
+ 'Cound not find sufficient amount of rpm entries in /var/log/messages, found {} entries'.format(output))
diff --git a/poky/meta/lib/oeqa/runtime/cases/scanelf.py b/poky/meta/lib/oeqa/runtime/cases/scanelf.py
new file mode 100644
index 000000000..3ba1f78af
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/scanelf.py
@@ -0,0 +1,26 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class ScanelfTest(OERuntimeTestCase):
+ scancmd = 'scanelf --quiet --recursive --mount --ldpath --path'
+
+ @OETestID(966)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['pax-utils'])
+ def test_scanelf_textrel(self):
+ # print TEXTREL information
+ cmd = '%s --textrel' % self.scancmd
+ status, output = self.target.run(cmd)
+ msg = '\n'.join([cmd, output])
+ self.assertEqual(output.strip(), '', msg=msg)
+
+ @OETestID(967)
+ @OETestDepends(['scanelf.ScanelfTest.test_scanelf_textrel'])
+ def test_scanelf_rpath(self):
+ # print RPATH information
+ cmd = '%s --textrel --rpath' % self.scancmd
+ status, output = self.target.run(cmd)
+ msg = '\n'.join([cmd, output])
+ self.assertEqual(output.strip(), '', msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/scp.py b/poky/meta/lib/oeqa/runtime/cases/scp.py
new file mode 100644
index 000000000..f488a6175
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/scp.py
@@ -0,0 +1,33 @@
+import os
+from tempfile import mkstemp
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+
+class ScpTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.tmp_fd, cls.tmp_path = mkstemp()
+ with os.fdopen(cls.tmp_fd, 'w') as f:
+ f.seek(2 ** 22 -1)
+ f.write(os.linesep)
+
+ @classmethod
+ def tearDownClass(cls):
+ os.remove(cls.tmp_path)
+
+ @OETestID(220)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_scp_file(self):
+ dst = '/tmp/test_scp_file'
+
+ (status, output) = self.target.copyTo(self.tmp_path, dst)
+ msg = 'File could not be copied. Output: %s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ (status, output) = self.target.run('ls -la %s' % dst)
+ self.assertEqual(status, 0, msg = 'SCP test failed')
+
+ self.target.run('rm %s' % dst)
diff --git a/poky/meta/lib/oeqa/runtime/cases/skeletoninit.py b/poky/meta/lib/oeqa/runtime/cases/skeletoninit.py
new file mode 100644
index 000000000..4fdcf033a
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/skeletoninit.py
@@ -0,0 +1,33 @@
+# This test should cover https://bugzilla.yoctoproject.org/tr_show_case.cgi?case_id=284
+# testcase. Image under test must have meta-skeleton layer in bblayers and
+# IMAGE_INSTALL_append = " service" in local.conf
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+
+class SkeletonBasicTest(OERuntimeTestCase):
+
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ @OEHasPackage(['service'])
+ @skipIfDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'Not appropiate for systemd image')
+ def test_skeleton_availability(self):
+ status, output = self.target.run('ls /etc/init.d/skeleton')
+ msg = 'skeleton init script not found. Output:\n%s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ status, output = self.target.run('ls /usr/sbin/skeleton-test')
+ msg = 'skeleton-test not found. Output:\n%s' % output
+ self.assertEqual(status, 0, msg=msg)
+
+ @OETestID(284)
+ @OETestDepends(['skeletoninit.SkeletonBasicTest.test_skeleton_availability'])
+ def test_skeleton_script(self):
+ output1 = self.target.run("/etc/init.d/skeleton start")[1]
+ cmd = '%s | grep [s]keleton-test' % self.tc.target_cmds['ps']
+ status, output2 = self.target.run(cmd)
+ msg = ('Skeleton script could not be started:'
+ '\n%s\n%s' % (output1, output2))
+ self.assertEqual(status, 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/ssh.py b/poky/meta/lib/oeqa/runtime/cases/ssh.py
new file mode 100644
index 000000000..eca167969
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/ssh.py
@@ -0,0 +1,15 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+
+class SSHTest(OERuntimeTestCase):
+
+ @OETestID(224)
+ @OETestDepends(['ping.PingTest.test_ping'])
+ def test_ssh(self):
+ (status, output) = self.target.run('uname -a')
+ self.assertEqual(status, 0, msg='SSH Test failed: %s' % output)
+ (status, output) = self.target.run('cat /etc/masterimage')
+ msg = "This isn't the right image - /etc/masterimage " \
+ "shouldn't be here %s" % output
+ self.assertEqual(status, 1, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/stap.py b/poky/meta/lib/oeqa/runtime/cases/stap.py
new file mode 100644
index 000000000..fc728bfc5
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/stap.py
@@ -0,0 +1,33 @@
+import os
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class StapTest(OERuntimeTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ src = os.path.join(cls.tc.runtime_files_dir, 'hello.stp')
+ dst = '/tmp/hello.stp'
+ cls.tc.target.copyTo(src, dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ files = '/tmp/hello.stp'
+ cls.tc.target.run('rm %s' % files)
+
+ @OETestID(1652)
+ @skipIfNotFeature('tools-profile',
+ 'Test requires tools-profile to be in IMAGE_FEATURES')
+ @OETestDepends(['kernelmodule.KernelModuleTest.test_kernel_module'])
+ def test_stap(self):
+ cmds = [
+ 'cd /usr/src/kernel && make scripts prepare',
+ 'cd /lib/modules/`uname -r` && (if [ ! -L build ]; then ln -s /usr/src/kernel build; fi)',
+ 'stap --disable-cache -DSTP_NO_VERREL_CHECK /tmp/hello.stp'
+ ]
+ for cmd in cmds:
+ status, output = self.target.run(cmd, 900)
+ self.assertEqual(status, 0, msg='\n'.join([cmd, output]))
diff --git a/poky/meta/lib/oeqa/runtime/cases/systemd.py b/poky/meta/lib/oeqa/runtime/cases/systemd.py
new file mode 100644
index 000000000..db69384c8
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/systemd.py
@@ -0,0 +1,181 @@
+import re
+import time
+
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfDataVar, skipIfNotDataVar
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class SystemdTest(OERuntimeTestCase):
+
+ def systemctl(self, action='', target='', expected=0, verbose=False):
+ command = 'systemctl %s %s' % (action, target)
+ status, output = self.target.run(command)
+ message = '\n'.join([command, output])
+ if status != expected and verbose:
+ cmd = 'systemctl status --full %s' % target
+ message += self.target.run(cmd)[1]
+ self.assertEqual(status, expected, message)
+ return output
+
+ #TODO: use pyjournalctl instead
+ def journalctl(self, args='',l_match_units=None):
+ """
+ Request for the journalctl output to the current target system
+
+ Arguments:
+ -args, an optional argument pass through argument
+ -l_match_units, an optional list of units to filter the output
+ Returns:
+ -string output of the journalctl command
+ Raises:
+ -AssertionError, on remote commands that fail
+ -ValueError, on a journalctl call with filtering by l_match_units that
+ returned no entries
+ """
+
+ query_units=''
+ if l_match_units:
+ query_units = ['_SYSTEMD_UNIT='+unit for unit in l_match_units]
+ query_units = ' '.join(query_units)
+ command = 'journalctl %s %s' %(args, query_units)
+ status, output = self.target.run(command)
+ if status:
+ raise AssertionError("Command '%s' returned non-zero exit "
+ 'code %d:\n%s' % (command, status, output))
+ if len(output) == 1 and "-- No entries --" in output:
+ raise ValueError('List of units to match: %s, returned no entries'
+ % l_match_units)
+ return output
+
+class SystemdBasicTests(SystemdTest):
+
+ def settle(self):
+ """
+ Block until systemd has finished activating any units being activated,
+ or until two minutes has elapsed.
+
+ Returns a tuple, either (True, '') if all units have finished
+ activating, or (False, message string) if there are still units
+ activating (generally, failing units that restart).
+ """
+ endtime = time.time() + (60 * 2)
+ while True:
+ status, output = self.target.run('systemctl --state=activating')
+ if "0 loaded units listed" in output:
+ return (True, '')
+ if time.time() >= endtime:
+ return (False, output)
+ time.sleep(10)
+
+ @skipIfNotFeature('systemd',
+ 'Test requires systemd to be in DISTRO_FEATURES')
+ @skipIfNotDataVar('VIRTUAL-RUNTIME_init_manager', 'systemd',
+ 'systemd is not the init manager for this image')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_systemd_basic(self):
+ self.systemctl('--version')
+
+ @OETestID(551)
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_list(self):
+ self.systemctl('list-unit-files')
+
+ @OETestID(550)
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_failed(self):
+ settled, output = self.settle()
+ msg = "Timed out waiting for systemd to settle:\n%s" % output
+ self.assertTrue(settled, msg=msg)
+
+ output = self.systemctl('list-units', '--failed')
+ match = re.search('0 loaded units listed', output)
+ if not match:
+ output += self.systemctl('status --full --failed')
+ self.assertTrue(match, msg='Some systemd units failed:\n%s' % output)
+
+
+class SystemdServiceTests(SystemdTest):
+
+ @OEHasPackage(['avahi-daemon'])
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_status(self):
+ self.systemctl('status --full', 'avahi-daemon.service')
+
+ @OETestID(695)
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ def test_systemd_stop_start(self):
+ self.systemctl('stop', 'avahi-daemon.service')
+ self.systemctl('is-active', 'avahi-daemon.service',
+ expected=3, verbose=True)
+ self.systemctl('start','avahi-daemon.service')
+ self.systemctl('is-active', 'avahi-daemon.service', verbose=True)
+
+ @OETestID(696)
+ @OETestDepends(['systemd.SystemdServiceTests.test_systemd_status'])
+ def test_systemd_disable_enable(self):
+ self.systemctl('disable', 'avahi-daemon.service')
+ self.systemctl('is-enabled', 'avahi-daemon.service', expected=1)
+ self.systemctl('enable', 'avahi-daemon.service')
+ self.systemctl('is-enabled', 'avahi-daemon.service')
+
+class SystemdJournalTests(SystemdTest):
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_journal(self):
+ status, output = self.target.run('journalctl')
+ self.assertEqual(status, 0, output)
+
+ @OETestDepends(['systemd.SystemdBasicTests.test_systemd_basic'])
+ def test_systemd_boot_time(self, systemd_TimeoutStartSec=90):
+ """
+ Get the target boot time from journalctl and log it
+
+ Arguments:
+ -systemd_TimeoutStartSec, an optional argument containing systemd's
+ unit start timeout to compare against
+ """
+
+ # The expression chain that uniquely identifies the time boot message.
+ expr_items=['Startup finished', 'kernel', 'userspace','\.$']
+ try:
+ output = self.journalctl(args='-o cat --reverse')
+ except AssertionError:
+ self.fail('Error occurred while calling journalctl')
+ if not len(output):
+ self.fail('Error, unable to get startup time from systemd journal')
+
+ # Check for the regular expression items that match the startup time.
+ for line in output.split('\n'):
+ check_match = ''.join(re.findall('.*'.join(expr_items), line))
+ if check_match:
+ break
+ # Put the startup time in the test log
+ if check_match:
+ self.tc.logger.info('%s' % check_match)
+ else:
+ self.skipTest('Error at obtaining the boot time from journalctl')
+ boot_time_sec = 0
+
+ # Get the numeric values from the string and convert them to seconds
+ # same data will be placed in list and string for manipulation.
+ l_boot_time = check_match.split(' ')[-2:]
+ s_boot_time = ' '.join(l_boot_time)
+ try:
+ # Obtain the minutes it took to boot.
+ if l_boot_time[0].endswith('min') and l_boot_time[0][0].isdigit():
+ boot_time_min = s_boot_time.split('min')[0]
+ # Convert to seconds and accumulate it.
+ boot_time_sec += int(boot_time_min) * 60
+ # Obtain the seconds it took to boot and accumulate.
+ boot_time_sec += float(l_boot_time[1].split('s')[0])
+ except ValueError:
+ self.skipTest('Error when parsing time from boot string')
+
+ # Assert the target boot time against systemd's unit start timeout.
+ if boot_time_sec > systemd_TimeoutStartSec:
+ msg = ("Target boot time %s exceeds systemd's TimeoutStartSec %s"
+ % (boot_time_sec, systemd_TimeoutStartSec))
+ self.tc.logger.info(msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/x32lib.py b/poky/meta/lib/oeqa/runtime/cases/x32lib.py
new file mode 100644
index 000000000..8da0154e7
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/x32lib.py
@@ -0,0 +1,19 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotInDataVar
+
+class X32libTest(OERuntimeTestCase):
+
+ @skipIfNotInDataVar('DEFAULTTUNE', 'x86-64-x32',
+ 'DEFAULTTUNE is not set to x86-64-x32')
+ @OETestID(281)
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_x32_file(self):
+ cmd = 'readelf -h /bin/ls | grep Class | grep ELF32'
+ status1 = self.target.run(cmd)[0]
+ cmd = 'readelf -h /bin/ls | grep Machine | grep X86-64'
+ status2 = self.target.run(cmd)[0]
+ msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" %
+ self.target.run("readelf -h /bin/ls")[1])
+ self.assertTrue(status1 == 0 and status2 == 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/cases/xorg.py b/poky/meta/lib/oeqa/runtime/cases/xorg.py
new file mode 100644
index 000000000..2124813e3
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/cases/xorg.py
@@ -0,0 +1,17 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.core.decorator.data import skipIfNotFeature
+
+class XorgTest(OERuntimeTestCase):
+
+ @OETestID(1151)
+ @skipIfNotFeature('x11-base',
+ 'Test requires x11 to be in IMAGE_FEATURES')
+ @OETestDepends(['ssh.SSHTest.test_ssh'])
+ def test_xorg_running(self):
+ cmd ='%s | grep -v xinit | grep [X]org' % self.tc.target_cmds['ps']
+ status, output = self.target.run(cmd)
+ msg = ('Xorg does not appear to be running %s' %
+ self.target.run(self.tc.target_cmds['ps'])[1])
+ self.assertEqual(status, 0, msg=msg)
diff --git a/poky/meta/lib/oeqa/runtime/context.py b/poky/meta/lib/oeqa/runtime/context.py
new file mode 100644
index 000000000..0294003fc
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/context.py
@@ -0,0 +1,226 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+
+from oeqa.core.context import OETestContext, OETestContextExecutor
+from oeqa.core.target.ssh import OESSHTarget
+from oeqa.core.target.qemu import OEQemuTarget
+from oeqa.utils.dump import HostDumper
+
+from oeqa.runtime.loader import OERuntimeTestLoader
+
+class OERuntimeTestContext(OETestContext):
+ loaderClass = OERuntimeTestLoader
+ runtime_files_dir = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), "files")
+
+ def __init__(self, td, logger, target,
+ host_dumper, image_packages, extract_dir):
+ super(OERuntimeTestContext, self).__init__(td, logger)
+
+ self.target = target
+ self.image_packages = image_packages
+ self.host_dumper = host_dumper
+ self.extract_dir = extract_dir
+ self._set_target_cmds()
+
+ def _set_target_cmds(self):
+ self.target_cmds = {}
+
+ self.target_cmds['ps'] = 'ps'
+ if 'procps' in self.image_packages:
+ self.target_cmds['ps'] = self.target_cmds['ps'] + ' -ef'
+
+class OERuntimeTestContextExecutor(OETestContextExecutor):
+ _context_class = OERuntimeTestContext
+
+ name = 'runtime'
+ help = 'runtime test component'
+ description = 'executes runtime tests over targets'
+
+ default_cases = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases')
+ default_data = None
+ default_test_data = 'data/testdata.json'
+ default_tests = ''
+
+ default_target_type = 'simpleremote'
+ default_manifest = 'data/manifest'
+ default_server_ip = '192.168.7.1'
+ default_target_ip = '192.168.7.2'
+ default_host_dumper_dir = '/tmp/oe-saved-tests'
+ default_extract_dir = 'packages/extracted'
+
+ def register_commands(self, logger, subparsers):
+ super(OERuntimeTestContextExecutor, self).register_commands(logger, subparsers)
+
+ runtime_group = self.parser.add_argument_group('runtime options')
+
+ runtime_group.add_argument('--target-type', action='store',
+ default=self.default_target_type, choices=['simpleremote', 'qemu'],
+ help="Target type of device under test, default: %s" \
+ % self.default_target_type)
+ runtime_group.add_argument('--target-ip', action='store',
+ default=self.default_target_ip,
+ help="IP address of device under test, default: %s" \
+ % self.default_target_ip)
+ runtime_group.add_argument('--server-ip', action='store',
+ default=self.default_target_ip,
+ help="IP address of device under test, default: %s" \
+ % self.default_server_ip)
+
+ runtime_group.add_argument('--host-dumper-dir', action='store',
+ default=self.default_host_dumper_dir,
+ help="Directory where host status is dumped, if tests fails, default: %s" \
+ % self.default_host_dumper_dir)
+
+ runtime_group.add_argument('--packages-manifest', action='store',
+ default=self.default_manifest,
+ help="Package manifest of the image under testi, default: %s" \
+ % self.default_manifest)
+
+ runtime_group.add_argument('--extract-dir', action='store',
+ default=self.default_extract_dir,
+ help='Directory where extracted packages reside, default: %s' \
+ % self.default_extract_dir)
+
+ runtime_group.add_argument('--qemu-boot', action='store',
+ help="Qemu boot configuration, only needed when target_type is QEMU.")
+
+ @staticmethod
+ def getTarget(target_type, logger, target_ip, server_ip, **kwargs):
+ target = None
+
+ if target_ip:
+ target_ip_port = target_ip.split(':')
+ if len(target_ip_port) == 2:
+ target_ip = target_ip_port[0]
+ kwargs['port'] = target_ip_port[1]
+
+ if target_type == 'simpleremote':
+ target = OESSHTarget(logger, target_ip, server_ip, **kwargs)
+ elif target_type == 'qemu':
+ target = OEQemuTarget(logger, target_ip, server_ip, **kwargs)
+ else:
+ # XXX: This code uses the old naming convention for controllers and
+ # targets, the idea it is to leave just targets as the controller
+ # most of the time was just a wrapper.
+ # XXX: This code tries to import modules from lib/oeqa/controllers
+ # directory and treat them as controllers, it will less error prone
+ # to use introspection to load such modules.
+ # XXX: Don't base your targets on this code it will be refactored
+ # in the near future.
+ # Custom target module loading
+ try:
+ target_modules_path = kwargs.get('target_modules_path', '')
+ controller = OERuntimeTestContextExecutor.getControllerModule(target_type, target_modules_path)
+ target = controller(logger, target_ip, server_ip, **kwargs)
+ except ImportError as e:
+ raise TypeError("Failed to import %s from available controller modules" % target_type)
+
+ return target
+
+ # Search oeqa.controllers module directory for and return a controller
+ # corresponding to the given target name.
+ # AttributeError raised if not found.
+ # ImportError raised if a provided module can not be imported.
+ @staticmethod
+ def getControllerModule(target, target_modules_path):
+ controllerslist = OERuntimeTestContextExecutor._getControllerModulenames(target_modules_path)
+ controller = OERuntimeTestContextExecutor._loadControllerFromName(target, controllerslist)
+ return controller
+
+ # Return a list of all python modules in lib/oeqa/controllers for each
+ # layer in bbpath
+ @staticmethod
+ def _getControllerModulenames(target_modules_path):
+
+ controllerslist = []
+
+ def add_controller_list(path):
+ if not os.path.exists(os.path.join(path, '__init__.py')):
+ raise OSError('Controllers directory %s exists but is missing __init__.py' % path)
+ files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
+ for f in files:
+ module = 'oeqa.controllers.' + f[:-3]
+ if module not in controllerslist:
+ controllerslist.append(module)
+ else:
+ raise RuntimeError("Duplicate controller module found for %s. Layers should create unique controller module names" % module)
+
+ extpath = target_modules_path.split(':')
+ for p in extpath:
+ controllerpath = os.path.join(p, 'lib', 'oeqa', 'controllers')
+ if os.path.exists(controllerpath):
+ add_controller_list(controllerpath)
+ return controllerslist
+
+ # Search for and return a controller from given target name and
+ # set of module names.
+ # Raise AttributeError if not found.
+ # Raise ImportError if a provided module can not be imported
+ @staticmethod
+ def _loadControllerFromName(target, modulenames):
+ for name in modulenames:
+ obj = OERuntimeTestContextExecutor._loadControllerFromModule(target, name)
+ if obj:
+ return obj
+ raise AttributeError("Unable to load {0} from available modules: {1}".format(target, str(modulenames)))
+
+ # Search for and return a controller or None from given module name
+ @staticmethod
+ def _loadControllerFromModule(target, modulename):
+ obj = None
+ # import module, allowing it to raise import exception
+ try:
+ module = __import__(modulename, globals(), locals(), [target])
+ except Exception as e:
+ return obj
+ # look for target class in the module, catching any exceptions as it
+ # is valid that a module may not have the target class.
+ try:
+ obj = getattr(module, target)
+ except:
+ obj = None
+ return obj
+
+ @staticmethod
+ def readPackagesManifest(manifest):
+ if not manifest or not os.path.exists(manifest):
+ raise OSError("Manifest file not exists: %s" % manifest)
+
+ image_packages = set()
+ with open(manifest, 'r') as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line and not line.startswith("#"):
+ image_packages.add(line.split()[0])
+
+ return image_packages
+
+ @staticmethod
+ def getHostDumper(cmds, directory):
+ return HostDumper(cmds, directory)
+
+ def _process_args(self, logger, args):
+ if not args.packages_manifest:
+ raise TypeError('Manifest file not provided')
+
+ super(OERuntimeTestContextExecutor, self)._process_args(logger, args)
+
+ target_kwargs = {}
+ target_kwargs['qemuboot'] = args.qemu_boot
+
+ self.tc_kwargs['init']['target'] = \
+ OERuntimeTestContextExecutor.getTarget(args.target_type,
+ None, args.target_ip, args.server_ip, **target_kwargs)
+ self.tc_kwargs['init']['host_dumper'] = \
+ OERuntimeTestContextExecutor.getHostDumper(None,
+ args.host_dumper_dir)
+ self.tc_kwargs['init']['image_packages'] = \
+ OERuntimeTestContextExecutor.readPackagesManifest(
+ args.packages_manifest)
+ self.tc_kwargs['init']['extract_dir'] = args.extract_dir
+
+_executor_class = OERuntimeTestContextExecutor
diff --git a/poky/meta/lib/oeqa/runtime/decorator/package.py b/poky/meta/lib/oeqa/runtime/decorator/package.py
new file mode 100644
index 000000000..aa6ecb68f
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/decorator/package.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.decorator import OETestDecorator, registerDecorator
+from oeqa.core.utils.misc import strToSet
+
+@registerDecorator
+class OEHasPackage(OETestDecorator):
+ """
+ Checks if image has packages (un)installed.
+
+ The argument must be a string, set, or list of packages that must be
+ installed or not present in the image.
+
+ The way to tell a package must not be in an image is using an
+ exclamation point ('!') before the name of the package.
+
+ If test depends on pkg1 or pkg2 you need to use:
+ @OEHasPackage({'pkg1', 'pkg2'})
+
+ If test depends on pkg1 and pkg2 you need to use:
+ @OEHasPackage('pkg1')
+ @OEHasPackage('pkg2')
+
+ If test depends on pkg1 but pkg2 must not be present use:
+ @OEHasPackage({'pkg1', '!pkg2'})
+ """
+
+ attrs = ('need_pkgs',)
+
+ def setUpDecorator(self):
+ need_pkgs = set()
+ unneed_pkgs = set()
+ pkgs = strToSet(self.need_pkgs)
+ for pkg in pkgs:
+ if pkg.startswith('!'):
+ unneed_pkgs.add(pkg[1:])
+ else:
+ need_pkgs.add(pkg)
+
+ if unneed_pkgs:
+ msg = 'Checking if %s is not installed' % ', '.join(unneed_pkgs)
+ self.logger.debug(msg)
+ if not self.case.tc.image_packages.isdisjoint(unneed_pkgs):
+ msg = "Test can't run with %s installed" % ', or'.join(unneed_pkgs)
+ self.case.skipTest(msg)
+
+ if need_pkgs:
+ msg = 'Checking if at least one of %s is installed' % ', '.join(need_pkgs)
+ self.logger.debug(msg)
+ if self.case.tc.image_packages.isdisjoint(need_pkgs):
+ msg = "Test requires %s to be installed" % ', or'.join(need_pkgs)
+ self.case.skipTest(msg)
diff --git a/poky/meta/lib/oeqa/runtime/files/hello.stp b/poky/meta/lib/oeqa/runtime/files/hello.stp
new file mode 100644
index 000000000..367714716
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/files/hello.stp
@@ -0,0 +1 @@
+probe oneshot { println("hello world") }
diff --git a/poky/meta/lib/oeqa/runtime/files/hellomod.c b/poky/meta/lib/oeqa/runtime/files/hellomod.c
new file mode 100644
index 000000000..a383397e9
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/files/hellomod.c
@@ -0,0 +1,19 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+static int __init hello_init(void)
+{
+ printk(KERN_INFO "Hello world!\n");
+ return 0;
+}
+
+static void __exit hello_cleanup(void)
+{
+ printk(KERN_INFO "Cleaning up hellomod.\n");
+}
+
+module_init(hello_init);
+module_exit(hello_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/poky/meta/lib/oeqa/runtime/files/hellomod_makefile b/poky/meta/lib/oeqa/runtime/files/hellomod_makefile
new file mode 100644
index 000000000..b92d5c8fe
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/files/hellomod_makefile
@@ -0,0 +1,8 @@
+obj-m := hellomod.o
+KDIR := /usr/src/kernel
+
+all:
+ $(MAKE) -C $(KDIR) M=$(PWD) modules
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(PWD) clean
diff --git a/poky/meta/lib/oeqa/runtime/files/testmakefile b/poky/meta/lib/oeqa/runtime/files/testmakefile
new file mode 100644
index 000000000..ca1844e93
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/files/testmakefile
@@ -0,0 +1,5 @@
+test: test.o
+ gcc -o test test.o -lm
+test.o: test.c
+ gcc -c test.c
+
diff --git a/poky/meta/lib/oeqa/runtime/loader.py b/poky/meta/lib/oeqa/runtime/loader.py
new file mode 100644
index 000000000..041ef976e
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/loader.py
@@ -0,0 +1,16 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.loader import OETestLoader
+from oeqa.runtime.case import OERuntimeTestCase
+
+class OERuntimeTestLoader(OETestLoader):
+ caseClass = OERuntimeTestCase
+
+ def _getTestCase(self, testCaseClass, tcName):
+ case = super(OERuntimeTestLoader, self)._getTestCase(testCaseClass, tcName)
+
+ # Adds custom attributes to the OERuntimeTestCase
+ setattr(case, 'target', self.tc.target)
+
+ return case
diff --git a/poky/meta/lib/oeqa/runtime/utils/__init__.py b/poky/meta/lib/oeqa/runtime/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/utils/__init__.py
diff --git a/poky/meta/lib/oeqa/runtime/utils/targetbuildproject.py b/poky/meta/lib/oeqa/runtime/utils/targetbuildproject.py
new file mode 100644
index 000000000..5af55d736
--- /dev/null
+++ b/poky/meta/lib/oeqa/runtime/utils/targetbuildproject.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.utils.buildproject import BuildProject
+
+class TargetBuildProject(BuildProject):
+
+ def __init__(self, target, uri, foldername=None, dl_dir=None):
+ self.target = target
+ self.targetdir = "~/"
+ BuildProject.__init__(self, uri, foldername, dl_dir=dl_dir)
+
+ def download_archive(self):
+ self._download_archive()
+
+ status, output = self.target.copyTo(self.localarchive, self.targetdir)
+ if status:
+ raise Exception('Failed to copy archive to target, '
+ 'output: %s' % output)
+
+ cmd = 'tar xf %s%s -C %s' % (self.targetdir,
+ self.archive,
+ self.targetdir)
+ status, output = self.target.run(cmd)
+ if status:
+ raise Exception('Failed to extract archive, '
+ 'output: %s' % output)
+
+ # Change targetdir to project folder
+ self.targetdir = self.targetdir + self.fname
+
+ # The timeout parameter of target.run is set to 0
+ # to make the ssh command run with no timeout.
+ def _run(self, cmd):
+ ret = self.target.run(cmd, 0)
+ msg = "Command %s failed with exit code %s: %s" % (cmd, ret[0], ret[1])
+ if ret[0] != 0:
+ raise Exception(msg)
+ return ret[0]
diff --git a/poky/meta/lib/oeqa/sdk/__init__.py b/poky/meta/lib/oeqa/sdk/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/__init__.py
diff --git a/poky/meta/lib/oeqa/sdk/case.py b/poky/meta/lib/oeqa/sdk/case.py
new file mode 100644
index 000000000..963aa8d35
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/case.py
@@ -0,0 +1,12 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import subprocess
+
+from oeqa.core.case import OETestCase
+
+class OESDKTestCase(OETestCase):
+ def _run(self, cmd):
+ return subprocess.check_output(". %s > /dev/null; %s;" % \
+ (self.tc.sdk_env, cmd), shell=True,
+ stderr=subprocess.STDOUT, universal_newlines=True)
diff --git a/poky/meta/lib/oeqa/sdk/cases/buildcpio.py b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py
new file mode 100644
index 000000000..333dc7c22
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py
@@ -0,0 +1,33 @@
+import unittest
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+
+class BuildCpioTest(OESDKTestCase):
+ td_vars = ['DATETIME']
+
+ @classmethod
+ def setUpClass(self):
+ dl_dir = self.td.get('DL_DIR', None)
+
+ self.project = SDKBuildProject(self.tc.sdk_dir + "/cpio/", self.tc.sdk_env,
+ "https://ftp.gnu.org/gnu/cpio/cpio-2.12.tar.gz",
+ self.tc.sdk_dir, self.td['DATETIME'], dl_dir=dl_dir)
+ self.project.download_archive()
+
+ machine = self.td.get("MACHINE")
+ if not self.tc.hasHostPackage("packagegroup-cross-canadian-%s" % machine):
+ raise unittest.SkipTest("SDK doesn't contain a cross-canadian toolchain")
+
+ def test_cpio(self):
+ self.assertEqual(self.project.run_configure(), 0,
+ msg="Running configure failed")
+
+ self.assertEqual(self.project.run_make(), 0,
+ msg="Running make failed")
+
+ self.assertEqual(self.project.run_install(), 0,
+ msg="Running make install failed")
+
+ @classmethod
+ def tearDownClass(self):
+ self.project.clean()
diff --git a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py
new file mode 100644
index 000000000..780afccc7
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py
@@ -0,0 +1,35 @@
+import unittest
+
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+
+class GalculatorTest(OESDKTestCase):
+ td_vars = ['DATETIME']
+
+ @classmethod
+ def setUpClass(self):
+ if not (self.tc.hasTargetPackage("gtk\+3") or\
+ self.tc.hasTargetPackage("libgtk-3.0")):
+ raise unittest.SkipTest("GalculatorTest class: SDK don't support gtk+3")
+
+ def test_galculator(self):
+ dl_dir = self.td.get('DL_DIR', None)
+ project = None
+ try:
+ project = SDKBuildProject(self.tc.sdk_dir + "/galculator/",
+ self.tc.sdk_env,
+ "http://galculator.mnim.org/downloads/galculator-2.1.4.tar.bz2",
+ self.tc.sdk_dir, self.td['DATETIME'], dl_dir=dl_dir)
+
+ project.download_archive()
+
+ # regenerate configure to get support for --with-libtool-sysroot
+ legacy_preconf=("autoreconf -i -f -I ${OECORE_TARGET_SYSROOT}/usr/share/aclocal -I m4;")
+
+ self.assertEqual(project.run_configure(extra_cmds=legacy_preconf),
+ 0, msg="Running configure failed")
+
+ self.assertEqual(project.run_make(), 0,
+ msg="Running make failed")
+ finally:
+ project.clean()
diff --git a/poky/meta/lib/oeqa/sdk/cases/buildlzip.py b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py
new file mode 100644
index 000000000..3a89ce862
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py
@@ -0,0 +1,36 @@
+import unittest
+from oeqa.sdk.case import OESDKTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+
+
+class BuildLzipTest(OESDKTestCase):
+ td_vars = ['DATETIME']
+
+ @classmethod
+ def setUpClass(self):
+ dl_dir = self.td.get('DL_DIR', None)
+
+ self.project = SDKBuildProject(self.tc.sdk_dir + "/lzip/", self.tc.sdk_env,
+ "http://downloads.yoctoproject.org/mirror/sources/lzip-1.19.tar.gz",
+ self.tc.sdk_dir, self.td['DATETIME'], dl_dir=dl_dir)
+ self.project.download_archive()
+
+ machine = self.td.get("MACHINE")
+
+ if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
+ self.tc.hasTargetPackage("gcc")):
+ raise unittest.SkipTest("SDK doesn't contain a cross-canadian toolchain")
+
+ def test_lzip(self):
+ self.assertEqual(self.project.run_configure(), 0,
+ msg="Running configure failed")
+
+ self.assertEqual(self.project.run_make(), 0,
+ msg="Running make failed")
+
+ self.assertEqual(self.project.run_install(), 0,
+ msg="Running make install failed")
+
+ @classmethod
+ def tearDownClass(self):
+ self.project.clean()
diff --git a/poky/meta/lib/oeqa/sdk/cases/gcc.py b/poky/meta/lib/oeqa/sdk/cases/gcc.py
new file mode 100644
index 000000000..d11f4b63f
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/gcc.py
@@ -0,0 +1,43 @@
+import os
+import shutil
+import unittest
+
+from oeqa.core.utils.path import remove_safe
+from oeqa.sdk.case import OESDKTestCase
+
+class GccCompileTest(OESDKTestCase):
+ td_vars = ['MACHINE']
+
+ @classmethod
+ def setUpClass(self):
+ files = {'test.c' : self.tc.files_dir, 'test.cpp' : self.tc.files_dir,
+ 'testsdkmakefile' : self.tc.sdk_files_dir}
+ for f in files:
+ shutil.copyfile(os.path.join(files[f], f),
+ os.path.join(self.tc.sdk_dir, f))
+
+ def setUp(self):
+ machine = self.td.get("MACHINE")
+ if not (self.tc.hasTargetPackage("packagegroup-cross-canadian-%s" % machine) or
+ self.tc.hasTargetPackage("gcc")):
+ raise unittest.SkipTest("GccCompileTest class: SDK doesn't contain a cross-canadian toolchain")
+
+ def test_gcc_compile(self):
+ self._run('$CC %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_gpp_compile(self):
+ self._run('$CXX %s/test.c -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_gpp2_compile(self):
+ self._run('$CXX %s/test.cpp -o %s/test -lm' % (self.tc.sdk_dir, self.tc.sdk_dir))
+
+ def test_make(self):
+ self._run('cd %s; make -f testsdkmakefile' % self.tc.sdk_dir)
+
+ @classmethod
+ def tearDownClass(self):
+ files = [os.path.join(self.tc.sdk_dir, f) \
+ for f in ['test.c', 'test.cpp', 'test.o', 'test',
+ 'testsdkmakefile']]
+ for f in files:
+ remove_safe(f)
diff --git a/poky/meta/lib/oeqa/sdk/cases/perl.py b/poky/meta/lib/oeqa/sdk/cases/perl.py
new file mode 100644
index 000000000..808567811
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/perl.py
@@ -0,0 +1,28 @@
+import os
+import shutil
+import unittest
+
+from oeqa.core.utils.path import remove_safe
+from oeqa.sdk.case import OESDKTestCase
+
+class PerlTest(OESDKTestCase):
+ @classmethod
+ def setUpClass(self):
+ if not (self.tc.hasHostPackage("nativesdk-perl") or
+ self.tc.hasHostPackage("perl-native")):
+ raise unittest.SkipTest("No perl package in the SDK")
+
+ for f in ['test.pl']:
+ shutil.copyfile(os.path.join(self.tc.files_dir, f),
+ os.path.join(self.tc.sdk_dir, f))
+ self.testfile = os.path.join(self.tc.sdk_dir, "test.pl")
+
+ def test_perl_exists(self):
+ self._run('which perl')
+
+ def test_perl_works(self):
+ self._run('perl %s' % self.testfile)
+
+ @classmethod
+ def tearDownClass(self):
+ remove_safe(self.testfile)
diff --git a/poky/meta/lib/oeqa/sdk/cases/python.py b/poky/meta/lib/oeqa/sdk/cases/python.py
new file mode 100644
index 000000000..72dfcc72b
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/cases/python.py
@@ -0,0 +1,32 @@
+import os
+import shutil
+import unittest
+
+from oeqa.core.utils.path import remove_safe
+from oeqa.sdk.case import OESDKTestCase
+
+class PythonTest(OESDKTestCase):
+ @classmethod
+ def setUpClass(self):
+ if not (self.tc.hasHostPackage("nativesdk-python") or
+ self.tc.hasHostPackage("python-native")):
+ raise unittest.SkipTest("No python package in the SDK")
+
+ for f in ['test.py']:
+ shutil.copyfile(os.path.join(self.tc.files_dir, f),
+ os.path.join(self.tc.sdk_dir, f))
+
+ def test_python_exists(self):
+ self._run('which python')
+
+ def test_python_stdout(self):
+ output = self._run('python %s/test.py' % self.tc.sdk_dir)
+ self.assertEqual(output.strip(), "the value of a is 0.01", msg="Incorrect output: %s" % output)
+
+ def test_python_testfile(self):
+ self._run('ls /tmp/testfile.python')
+
+ @classmethod
+ def tearDownClass(self):
+ remove_safe("%s/test.py" % self.tc.sdk_dir)
+ remove_safe("/tmp/testfile.python")
diff --git a/poky/meta/lib/oeqa/sdk/context.py b/poky/meta/lib/oeqa/sdk/context.py
new file mode 100644
index 000000000..b3d7c7518
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/context.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import glob
+import re
+
+from oeqa.core.context import OETestContextExecutor
+from oeqa.core.threaded import OETestContextThreaded
+
+class OESDKTestContext(OETestContextThreaded):
+ sdk_files_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+
+ def __init__(self, td=None, logger=None, sdk_dir=None, sdk_env=None,
+ target_pkg_manifest=None, host_pkg_manifest=None):
+ super(OESDKTestContext, self).__init__(td, logger)
+
+ self.sdk_dir = sdk_dir
+ self.sdk_env = sdk_env
+ self.target_pkg_manifest = target_pkg_manifest
+ self.host_pkg_manifest = host_pkg_manifest
+
+ def _hasPackage(self, manifest, pkg):
+ for host_pkg in manifest.keys():
+ if re.search(pkg, host_pkg):
+ return True
+ return False
+
+ def hasHostPackage(self, pkg):
+ return self._hasPackage(self.host_pkg_manifest, pkg)
+
+ def hasTargetPackage(self, pkg):
+ return self._hasPackage(self.target_pkg_manifest, pkg)
+
+class OESDKTestContextExecutor(OETestContextExecutor):
+ _context_class = OESDKTestContext
+
+ name = 'sdk'
+ help = 'sdk test component'
+ description = 'executes sdk tests'
+
+ default_cases = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'cases')]
+ default_test_data = None
+
+ def register_commands(self, logger, subparsers):
+ super(OESDKTestContextExecutor, self).register_commands(logger, subparsers)
+
+ sdk_group = self.parser.add_argument_group('sdk options')
+ sdk_group.add_argument('--sdk-env', action='store',
+ help='sdk environment')
+ sdk_group.add_argument('--target-manifest', action='store',
+ help='sdk target manifest')
+ sdk_group.add_argument('--host-manifest', action='store',
+ help='sdk host manifest')
+
+ sdk_dgroup = self.parser.add_argument_group('sdk display options')
+ sdk_dgroup.add_argument('--list-sdk-env', action='store_true',
+ default=False, help='sdk list available environment')
+
+ # XXX this option is required but argparse_oe has a bug handling
+ # required options, seems that don't keep track of already parsed
+ # options
+ sdk_rgroup = self.parser.add_argument_group('sdk required options')
+ sdk_rgroup.add_argument('--sdk-dir', required=False, action='store',
+ help='sdk installed directory')
+
+ @staticmethod
+ def _load_manifest(manifest):
+ pkg_manifest = {}
+ if manifest:
+ with open(manifest) as f:
+ for line in f:
+ (pkg, arch, version) = line.strip().split()
+ pkg_manifest[pkg] = (version, arch)
+
+ return pkg_manifest
+
+ def _process_args(self, logger, args):
+ super(OESDKTestContextExecutor, self)._process_args(logger, args)
+
+ self.tc_kwargs['init']['sdk_dir'] = args.sdk_dir
+ self.tc_kwargs['init']['sdk_env'] = self.sdk_env
+ self.tc_kwargs['init']['target_pkg_manifest'] = \
+ OESDKTestContextExecutor._load_manifest(args.target_manifest)
+ self.tc_kwargs['init']['host_pkg_manifest'] = \
+ OESDKTestContextExecutor._load_manifest(args.host_manifest)
+
+ @staticmethod
+ def _get_sdk_environs(sdk_dir):
+ sdk_env = {}
+
+ environ_pattern = sdk_dir + '/environment-setup-*'
+ full_sdk_env = glob.glob(sdk_dir + '/environment-setup-*')
+ for env in full_sdk_env:
+ m = re.search('environment-setup-(.*)', env)
+ if m:
+ sdk_env[m.group(1)] = env
+
+ return sdk_env
+
+ def _display_sdk_envs(self, log, args, sdk_envs):
+ log("Available SDK environments at directory %s:" \
+ % args.sdk_dir)
+ log("")
+ for env in sdk_envs:
+ log(env)
+
+ def run(self, logger, args):
+ import argparse_oe
+
+ if not args.sdk_dir:
+ raise argparse_oe.ArgumentUsageError("No SDK directory "\
+ "specified please do, --sdk-dir SDK_DIR", self.name)
+
+ sdk_envs = OESDKTestContextExecutor._get_sdk_environs(args.sdk_dir)
+ if not sdk_envs:
+ raise argparse_oe.ArgumentUsageError("No available SDK "\
+ "enviroments found at %s" % args.sdk_dir, self.name)
+
+ if args.list_sdk_env:
+ self._display_sdk_envs(logger.info, args, sdk_envs)
+ sys.exit(0)
+
+ if not args.sdk_env in sdk_envs:
+ self._display_sdk_envs(logger.error, args, sdk_envs)
+ raise argparse_oe.ArgumentUsageError("No valid SDK "\
+ "environment (%s) specified" % args.sdk_env, self.name)
+
+ self.sdk_env = sdk_envs[args.sdk_env]
+ return super(OESDKTestContextExecutor, self).run(logger, args)
+
+_executor_class = OESDKTestContextExecutor
diff --git a/poky/meta/lib/oeqa/sdk/files/testsdkmakefile b/poky/meta/lib/oeqa/sdk/files/testsdkmakefile
new file mode 100644
index 000000000..fb05f822f
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/files/testsdkmakefile
@@ -0,0 +1,5 @@
+test: test.o
+ $(CC) -o test test.o -lm
+test.o: test.c
+ $(CC) -c test.c
+
diff --git a/poky/meta/lib/oeqa/sdk/utils/__init__.py b/poky/meta/lib/oeqa/sdk/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/utils/__init__.py
diff --git a/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py b/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
new file mode 100644
index 000000000..4e251142d
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdk/utils/sdkbuildproject.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import subprocess
+
+from oeqa.utils.buildproject import BuildProject
+
+class SDKBuildProject(BuildProject):
+ def __init__(self, testpath, sdkenv, uri, testlogdir, builddatetime,
+ foldername=None, dl_dir=None):
+ self.sdkenv = sdkenv
+ self.testdir = testpath
+ self.targetdir = testpath
+ os.makedirs(testpath, exist_ok=True)
+ self.datetime = builddatetime
+ self.testlogdir = testlogdir
+ os.makedirs(self.testlogdir, exist_ok=True)
+ self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime)
+ BuildProject.__init__(self, uri, foldername, tmpdir=testpath, dl_dir=dl_dir)
+
+ def download_archive(self):
+
+ self._download_archive()
+
+ cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)
+ subprocess.check_output(cmd, shell=True)
+
+ #Change targetdir to project folder
+ self.targetdir = os.path.join(self.targetdir, self.fname)
+
+ def run_configure(self, configure_args='', extra_cmds=''):
+ return super(SDKBuildProject, self).run_configure(configure_args=(configure_args or '$CONFIGURE_FLAGS'), extra_cmds=extra_cmds)
+
+ def run_install(self, install_args=''):
+ return super(SDKBuildProject, self).run_install(install_args=(install_args or "DESTDIR=%s/../install" % self.targetdir))
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s\n" % msg)
+
+ def _run(self, cmd):
+ self.log("Running . %s; " % self.sdkenv + cmd)
+ return subprocess.call(". %s; " % self.sdkenv + cmd, shell=True)
diff --git a/poky/meta/lib/oeqa/sdkext/__init__.py b/poky/meta/lib/oeqa/sdkext/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/__init__.py
diff --git a/poky/meta/lib/oeqa/sdkext/case.py b/poky/meta/lib/oeqa/sdkext/case.py
new file mode 100644
index 000000000..21b718831
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/case.py
@@ -0,0 +1,21 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import subprocess
+
+from oeqa.utils import avoid_paths_in_environ
+from oeqa.sdk.case import OESDKTestCase
+
+class OESDKExtTestCase(OESDKTestCase):
+ def _run(self, cmd):
+ # extensible sdk shows a warning if found bitbake in the path
+ # because can cause contamination, i.e. use devtool from
+ # poky/scripts instead of eSDK one.
+ env = os.environ.copy()
+ paths_to_avoid = ['bitbake/bin', 'poky/scripts']
+ env['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+
+ return subprocess.check_output(". %s > /dev/null;"\
+ " %s;" % (self.tc.sdk_env, cmd), stderr=subprocess.STDOUT,
+ shell=True, env=env, universal_newlines=True)
diff --git a/poky/meta/lib/oeqa/sdkext/cases/devtool.py b/poky/meta/lib/oeqa/sdkext/cases/devtool.py
new file mode 100644
index 000000000..ea9051710
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/cases/devtool.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import shutil
+import subprocess
+
+from oeqa.sdkext.case import OESDKExtTestCase
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.httpserver import HTTPService
+
+class DevtoolTest(OESDKExtTestCase):
+ @classmethod
+ def setUpClass(cls):
+ myapp_src = os.path.join(cls.tc.esdk_files_dir, "myapp")
+ cls.myapp_dst = os.path.join(cls.tc.sdk_dir, "myapp")
+ shutil.copytree(myapp_src, cls.myapp_dst)
+
+ myapp_cmake_src = os.path.join(cls.tc.esdk_files_dir, "myapp_cmake")
+ cls.myapp_cmake_dst = os.path.join(cls.tc.sdk_dir, "myapp_cmake")
+ shutil.copytree(myapp_cmake_src, cls.myapp_cmake_dst)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.myapp_dst)
+ shutil.rmtree(cls.myapp_cmake_dst)
+
+ def _test_devtool_build(self, directory):
+ self._run('devtool add myapp %s' % directory)
+ try:
+ self._run('devtool build myapp')
+ finally:
+ self._run('devtool reset myapp')
+
+ def _test_devtool_build_package(self, directory):
+ self._run('devtool add myapp %s' % directory)
+ try:
+ self._run('devtool package myapp')
+ finally:
+ self._run('devtool reset myapp')
+
+ def test_devtool_location(self):
+ output = self._run('which devtool')
+ self.assertEqual(output.startswith(self.tc.sdk_dir), True, \
+ msg="Seems that devtool isn't the eSDK one: %s" % output)
+
+ @OETestDepends(['test_devtool_location'])
+ def test_devtool_add_reset(self):
+ self._run('devtool add myapp %s' % self.myapp_dst)
+ self._run('devtool reset myapp')
+
+ @OETestID(1605)
+ @OETestDepends(['test_devtool_location'])
+ def test_devtool_build_make(self):
+ self._test_devtool_build(self.myapp_dst)
+
+ @OETestID(1606)
+ @OETestDepends(['test_devtool_location'])
+ def test_devtool_build_esdk_package(self):
+ self._test_devtool_build_package(self.myapp_dst)
+
+ @OETestID(1607)
+ @OETestDepends(['test_devtool_location'])
+ def test_devtool_build_cmake(self):
+ self._test_devtool_build(self.myapp_cmake_dst)
+
+ @OETestID(1608)
+ @OETestDepends(['test_devtool_location'])
+ def test_extend_autotools_recipe_creation(self):
+ req = 'https://github.com/rdfa/librdfa'
+ recipe = "librdfa"
+ self._run('devtool sdk-install libxml2')
+ self._run('devtool add %s %s' % (recipe, req) )
+ try:
+ self._run('devtool build %s' % recipe)
+ finally:
+ self._run('devtool reset %s' % recipe)
+
+ @OETestID(1609)
+ @OETestDepends(['test_devtool_location'])
+ def test_devtool_kernelmodule(self):
+ docfile = 'https://github.com/umlaeute/v4l2loopback.git'
+ recipe = 'v4l2loopback-driver'
+ self._run('devtool add %s %s' % (recipe, docfile) )
+ try:
+ self._run('devtool build %s' % recipe)
+ finally:
+ self._run('devtool reset %s' % recipe)
+
+ @OETestID(1610)
+ @OETestDepends(['test_devtool_location'])
+ def test_recipes_for_nodejs(self):
+ package_nodejs = "npm://registry.npmjs.org;name=winston;version=2.2.0"
+ self._run('devtool add %s ' % package_nodejs)
+ try:
+ self._run('devtool build %s ' % package_nodejs)
+ finally:
+ self._run('devtool reset %s '% package_nodejs)
+
+class SdkUpdateTest(OESDKExtTestCase):
+ @classmethod
+ def setUpClass(self):
+ self.publish_dir = os.path.join(self.tc.sdk_dir, 'esdk_publish')
+ if os.path.exists(self.publish_dir):
+ shutil.rmtree(self.publish_dir)
+ os.mkdir(self.publish_dir)
+
+ base_tcname = "%s/%s" % (self.td.get("SDK_DEPLOY", ''),
+ self.td.get("TOOLCHAINEXT_OUTPUTNAME", ''))
+ tcname_new = "%s-new.sh" % base_tcname
+ if not os.path.exists(tcname_new):
+ tcname_new = "%s.sh" % base_tcname
+
+ cmd = 'oe-publish-sdk %s %s' % (tcname_new, self.publish_dir)
+ subprocess.check_output(cmd, shell=True)
+
+ self.http_service = HTTPService(self.publish_dir)
+ self.http_service.start()
+
+ self.http_url = "http://127.0.0.1:%d" % self.http_service.port
+
+ def test_sdk_update_http(self):
+ output = self._run("devtool sdk-update \"%s\"" % self.http_url)
+
+ @classmethod
+ def tearDownClass(self):
+ self.http_service.stop()
+ shutil.rmtree(self.publish_dir)
diff --git a/poky/meta/lib/oeqa/sdkext/context.py b/poky/meta/lib/oeqa/sdkext/context.py
new file mode 100644
index 000000000..65da4c6e1
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/context.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+from oeqa.sdk.context import OESDKTestContext, OESDKTestContextExecutor
+
+class OESDKExtTestContext(OESDKTestContext):
+ esdk_files_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
+
+ # FIXME - We really need to do better mapping of names here, this at
+ # least allows some tests to run
+ def hasHostPackage(self, pkg):
+ # We force a toolchain to be installed into the eSDK even if its minimal
+ if pkg.startswith("packagegroup-cross-canadian-"):
+ return True
+ return self._hasPackage(self.host_pkg_manifest, pkg)
+
+class OESDKExtTestContextExecutor(OESDKTestContextExecutor):
+ _context_class = OESDKExtTestContext
+
+ name = 'esdk'
+ help = 'esdk test component'
+ description = 'executes esdk tests'
+
+ default_cases = OESDKTestContextExecutor.default_cases + \
+ [os.path.join(os.path.abspath(os.path.dirname(__file__)), 'cases')]
+ default_test_data = None
+
+_executor_class = OESDKExtTestContextExecutor
diff --git a/poky/meta/lib/oeqa/sdkext/files/myapp/Makefile b/poky/meta/lib/oeqa/sdkext/files/myapp/Makefile
new file mode 100644
index 000000000..abd91bea6
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/files/myapp/Makefile
@@ -0,0 +1,10 @@
+all: myapp
+
+myapp: myapp.o
+ $(CC) $(LDFLAGS) $< -o $@
+
+myapp.o: myapp.c
+ $(CC) $(CFLAGS) -c $< -o $@
+
+clean:
+ rm -rf myapp.o myapp
diff --git a/poky/meta/lib/oeqa/sdkext/files/myapp/myapp.c b/poky/meta/lib/oeqa/sdkext/files/myapp/myapp.c
new file mode 100644
index 000000000..f0b63f03f
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/files/myapp/myapp.c
@@ -0,0 +1,9 @@
+#include <stdio.h>
+
+int
+main(int argc, char *argv[])
+{
+ printf("Hello world\n");
+
+ return 0;
+}
diff --git a/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/CMakeLists.txt b/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/CMakeLists.txt
new file mode 100644
index 000000000..19d773dd6
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/CMakeLists.txt
@@ -0,0 +1,11 @@
+cmake_minimum_required (VERSION 2.6)
+project (myapp)
+# The version number.
+set (myapp_VERSION_MAJOR 1)
+set (myapp_VERSION_MINOR 0)
+
+# add the executable
+add_executable (myapp myapp.c)
+
+install(TARGETS myapp
+ RUNTIME DESTINATION bin)
diff --git a/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/myapp.c b/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/myapp.c
new file mode 100644
index 000000000..f0b63f03f
--- /dev/null
+++ b/poky/meta/lib/oeqa/sdkext/files/myapp_cmake/myapp.c
@@ -0,0 +1,9 @@
+#include <stdio.h>
+
+int
+main(int argc, char *argv[])
+{
+ printf("Hello world\n");
+
+ return 0;
+}
diff --git a/poky/meta/lib/oeqa/selftest/case.py b/poky/meta/lib/oeqa/selftest/case.py
new file mode 100644
index 000000000..e09915b49
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/case.py
@@ -0,0 +1,278 @@
+# Copyright (C) 2013-2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import sys
+import os
+import shutil
+import glob
+import errno
+from unittest.util import safe_repr
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.core.case import OETestCase
+
+class OESelftestTestCase(OETestCase):
+ def __init__(self, methodName="runTest"):
+ self._extra_tear_down_commands = []
+ super(OESelftestTestCase, self).__init__(methodName)
+
+ @classmethod
+ def setUpClass(cls):
+ super(OESelftestTestCase, cls).setUpClass()
+
+ cls.testlayer_path = cls.tc.config_paths['testlayer_path']
+ cls.builddir = cls.tc.config_paths['builddir']
+
+ cls.localconf_path = cls.tc.config_paths['localconf']
+ cls.localconf_backup = cls.tc.config_paths['localconf_class_backup']
+ cls.local_bblayers_path = cls.tc.config_paths['bblayers']
+ cls.local_bblayers_backup = cls.tc.config_paths['bblayers_class_backup']
+
+ cls.testinc_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/selftest.inc")
+ cls.testinc_bblayers_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/bblayers.inc")
+ cls.machineinc_path = os.path.join(cls.tc.config_paths['builddir'],
+ "conf/machine.inc")
+
+ cls._track_for_cleanup = [
+ cls.testinc_path, cls.testinc_bblayers_path,
+ cls.machineinc_path, cls.localconf_backup,
+ cls.local_bblayers_backup]
+
+ cls.add_include()
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.remove_include()
+ cls.remove_inc_files()
+ super(OESelftestTestCase, cls).tearDownClass()
+
+ @classmethod
+ def add_include(cls):
+ if "#include added by oe-selftest" \
+ not in ftools.read_file(os.path.join(cls.builddir, "conf/local.conf")):
+ cls.logger.info("Adding: \"include selftest.inc\" in %s" % os.path.join(cls.builddir, "conf/local.conf"))
+ ftools.append_file(os.path.join(cls.builddir, "conf/local.conf"), \
+ "\n#include added by oe-selftest\ninclude machine.inc\ninclude selftest.inc")
+
+ if "#include added by oe-selftest" \
+ not in ftools.read_file(os.path.join(cls.builddir, "conf/bblayers.conf")):
+ cls.logger.info("Adding: \"include bblayers.inc\" in bblayers.conf")
+ ftools.append_file(os.path.join(cls.builddir, "conf/bblayers.conf"), \
+ "\n#include added by oe-selftest\ninclude bblayers.inc")
+
+ @classmethod
+ def remove_include(cls):
+ if "#include added by oe-selftest.py" \
+ in ftools.read_file(os.path.join(cls.builddir, "conf/local.conf")):
+ cls.logger.info("Removing the include from local.conf")
+ ftools.remove_from_file(os.path.join(cls.builddir, "conf/local.conf"), \
+ "\n#include added by oe-selftest.py\ninclude machine.inc\ninclude selftest.inc")
+
+ if "#include added by oe-selftest.py" \
+ in ftools.read_file(os.path.join(cls.builddir, "conf/bblayers.conf")):
+ cls.logger.info("Removing the include from bblayers.conf")
+ ftools.remove_from_file(os.path.join(cls.builddir, "conf/bblayers.conf"), \
+ "\n#include added by oe-selftest.py\ninclude bblayers.inc")
+
+ @classmethod
+ def remove_inc_files(cls):
+ try:
+ os.remove(os.path.join(cls.builddir, "conf/selftest.inc"))
+ for root, _, files in os.walk(cls.testlayer_path):
+ for f in files:
+ if f == 'test_recipe.inc':
+ os.remove(os.path.join(root, f))
+ except OSError as e:
+ pass
+
+ for incl_file in ['conf/bblayers.inc', 'conf/machine.inc']:
+ try:
+ os.remove(os.path.join(cls.builddir, incl_file))
+ except:
+ pass
+
+ def setUp(self):
+ super(OESelftestTestCase, self).setUp()
+ os.chdir(self.builddir)
+ # Check if local.conf or bblayers.conf files backup exists
+ # from a previous failed test and restore them
+ if os.path.isfile(self.localconf_backup) or os.path.isfile(
+ self.local_bblayers_backup):
+ self.logger.debug("\
+Found a local.conf and/or bblayers.conf backup from a previously aborted test.\
+Restoring these files now, but tests should be re-executed from a clean environment\
+to ensure accurate results.")
+ try:
+ shutil.copyfile(self.localconf_backup, self.localconf_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ try:
+ shutil.copyfile(self.local_bblayers_backup,
+ self.local_bblayers_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ # backup local.conf and bblayers.conf
+ shutil.copyfile(self.localconf_path, self.localconf_backup)
+ shutil.copyfile(self.local_bblayers_path, self.local_bblayers_backup)
+ self.logger.debug("Creating local.conf and bblayers.conf backups.")
+ # we don't know what the previous test left around in config or inc files
+ # if it failed so we need a fresh start
+ try:
+ os.remove(self.testinc_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ for root, _, files in os.walk(self.testlayer_path):
+ for f in files:
+ if f == 'test_recipe.inc':
+ os.remove(os.path.join(root, f))
+
+ for incl_file in [self.testinc_bblayers_path, self.machineinc_path]:
+ try:
+ os.remove(incl_file)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ if self.tc.custommachine:
+ machine_conf = 'MACHINE ??= "%s"\n' % self.tc.custommachine
+ self.set_machine_config(machine_conf)
+
+ # tests might need their own setup
+ # but if they overwrite this one they have to call
+ # super each time, so let's give them an alternative
+ self.setUpLocal()
+
+ def setUpLocal(self):
+ pass
+
+ def tearDown(self):
+ if self._extra_tear_down_commands:
+ failed_extra_commands = []
+ for command in self._extra_tear_down_commands:
+ result = runCmd(command, ignore_status=True)
+ if not result.status == 0:
+ failed_extra_commands.append(command)
+ if failed_extra_commands:
+ self.logger.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
+ self.logger.debug("Trying to move on.")
+ self._extra_tear_down_commands = []
+
+ if self._track_for_cleanup:
+ for path in self._track_for_cleanup:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ if os.path.isfile(path):
+ os.remove(path)
+ self._track_for_cleanup = []
+
+ self.tearDownLocal()
+ super(OESelftestTestCase, self).tearDown()
+
+ def tearDownLocal(self):
+ pass
+
+ def add_command_to_tearDown(self, command):
+ """Add test specific commands to the tearDown method"""
+ self.logger.debug("Adding command '%s' to tearDown for this test." % command)
+ self._extra_tear_down_commands.append(command)
+
+ def track_for_cleanup(self, path):
+ """Add test specific files or directories to be removed in the tearDown method"""
+ self.logger.debug("Adding path '%s' to be cleaned up when test is over" % path)
+ self._track_for_cleanup.append(path)
+
+ def write_config(self, data):
+ """Write to <builddir>/conf/selftest.inc"""
+
+ self.logger.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
+ ftools.write_file(self.testinc_path, data)
+
+ if self.tc.custommachine and 'MACHINE' in data:
+ machine = get_bb_var('MACHINE')
+ self.logger.warning('MACHINE overridden: %s' % machine)
+
+ def append_config(self, data):
+ """Append to <builddir>/conf/selftest.inc"""
+ self.logger.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
+ ftools.append_file(self.testinc_path, data)
+
+ if self.tc.custommachine and 'MACHINE' in data:
+ machine = get_bb_var('MACHINE')
+ self.logger.warning('MACHINE overridden: %s' % machine)
+
+ def remove_config(self, data):
+ """Remove data from <builddir>/conf/selftest.inc"""
+ self.logger.debug("Removing from: %s\n%s\n" % (self.testinc_path, data))
+ ftools.remove_from_file(self.testinc_path, data)
+
+ def recipeinc(self, recipe):
+ """Return absolute path of meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ return os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
+
+ def write_recipeinc(self, recipe, data):
+ """Write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Writing to: %s\n%s\n" % (inc_file, data))
+ ftools.write_file(inc_file, data)
+ return inc_file
+
+ def append_recipeinc(self, recipe, data):
+ """Append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Appending to: %s\n%s\n" % (inc_file, data))
+ ftools.append_file(inc_file, data)
+ return inc_file
+
+ def remove_recipeinc(self, recipe, data):
+ """Remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Removing from: %s\n%s\n" % (inc_file, data))
+ ftools.remove_from_file(inc_file, data)
+
+ def delete_recipeinc(self, recipe):
+ """Delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file"""
+ inc_file = self.recipeinc(recipe)
+ self.logger.debug("Deleting file: %s" % inc_file)
+ try:
+ os.remove(inc_file)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ def write_bblayers_config(self, data):
+ """Write to <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Writing to: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.write_file(self.testinc_bblayers_path, data)
+
+ def append_bblayers_config(self, data):
+ """Append to <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Appending to: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.append_file(self.testinc_bblayers_path, data)
+
+ def remove_bblayers_config(self, data):
+ """Remove data from <builddir>/conf/bblayers.inc"""
+ self.logger.debug("Removing from: %s\n%s\n" % (self.testinc_bblayers_path, data))
+ ftools.remove_from_file(self.testinc_bblayers_path, data)
+
+ def set_machine_config(self, data):
+ """Write to <builddir>/conf/machine.inc"""
+ self.logger.debug("Writing to: %s\n%s\n" % (self.machineinc_path, data))
+ ftools.write_file(self.machineinc_path, data)
+
+ # check does path exist
+ def assertExists(self, expr, msg=None):
+ if not os.path.exists(expr):
+ msg = self._formatMessage(msg, "%s does not exist" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ # check does path not exist
+ def assertNotExists(self, expr, msg=None):
+ if os.path.exists(expr):
+ msg = self._formatMessage(msg, "%s exists when it should not" % safe_repr(expr))
+ raise self.failureException(msg)
diff --git a/poky/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py b/poky/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
new file mode 100644
index 000000000..0e5896234
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/_sstatetests_noauto.py
@@ -0,0 +1,92 @@
+import os
+import shutil
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.selftest.cases.sstate import SStateBase
+
+
+class RebuildFromSState(SStateBase):
+
+ @classmethod
+ def setUpClass(self):
+ super(RebuildFromSState, self).setUpClass()
+ self.builddir = os.path.join(os.environ.get('BUILDDIR'))
+
+ def get_dep_targets(self, primary_targets):
+ found_targets = []
+ bitbake("-g " + ' '.join(map(str, primary_targets)))
+ with open(os.path.join(self.builddir, 'pn-buildlist'), 'r') as pnfile:
+ found_targets = pnfile.read().splitlines()
+ return found_targets
+
+ def configure_builddir(self, builddir):
+ os.mkdir(builddir)
+ self.track_for_cleanup(builddir)
+ os.mkdir(os.path.join(builddir, 'conf'))
+ shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/local.conf'), os.path.join(builddir, 'conf/local.conf'))
+ config = {}
+ config['default_sstate_dir'] = "SSTATE_DIR ?= \"${TOPDIR}/sstate-cache\""
+ config['null_sstate_mirrors'] = "SSTATE_MIRRORS = \"\""
+ config['default_tmp_dir'] = "TMPDIR = \"${TOPDIR}/tmp\""
+ for key in config:
+ ftools.append_file(os.path.join(builddir, 'conf/selftest.inc'), config[key])
+ shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/bblayers.conf'), os.path.join(builddir, 'conf/bblayers.conf'))
+ try:
+ shutil.copyfile(os.path.join(os.environ.get('BUILDDIR'), 'conf/auto.conf'), os.path.join(builddir, 'conf/auto.conf'))
+ except:
+ pass
+
+ def hardlink_tree(self, src, dst):
+ os.mkdir(dst)
+ self.track_for_cleanup(dst)
+ for root, dirs, files in os.walk(src):
+ if root == src:
+ continue
+ os.mkdir(os.path.join(dst, root.split(src)[1][1:]))
+ for sstate_file in files:
+ os.link(os.path.join(root, sstate_file), os.path.join(dst, root.split(src)[1][1:], sstate_file))
+
+ def run_test_sstate_rebuild(self, primary_targets, relocate=False, rebuild_dependencies=False):
+ buildA = os.path.join(self.builddir, 'buildA')
+ if relocate:
+ buildB = os.path.join(self.builddir, 'buildB')
+ else:
+ buildB = buildA
+
+ if rebuild_dependencies:
+ rebuild_targets = self.get_dep_targets(primary_targets)
+ else:
+ rebuild_targets = primary_targets
+
+ self.configure_builddir(buildA)
+ runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildA)) + 'bitbake ' + ' '.join(map(str, primary_targets)), shell=True, executable='/bin/bash')
+ self.hardlink_tree(os.path.join(buildA, 'sstate-cache'), os.path.join(self.builddir, 'sstate-cache-buildA'))
+ shutil.rmtree(buildA)
+
+ failed_rebuild = []
+ failed_cleansstate = []
+ for target in rebuild_targets:
+ self.configure_builddir(buildB)
+ self.hardlink_tree(os.path.join(self.builddir, 'sstate-cache-buildA'), os.path.join(buildB, 'sstate-cache'))
+
+ result_cleansstate = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake -ccleansstate ' + target, ignore_status=True, shell=True, executable='/bin/bash')
+ if not result_cleansstate.status == 0:
+ failed_cleansstate.append(target)
+ shutil.rmtree(buildB)
+ continue
+
+ result_build = runCmd((". %s/oe-init-build-env %s && " % (get_bb_var('COREBASE'), buildB)) + 'bitbake ' + target, ignore_status=True, shell=True, executable='/bin/bash')
+ if not result_build.status == 0:
+ failed_rebuild.append(target)
+
+ shutil.rmtree(buildB)
+
+ self.assertFalse(failed_rebuild, msg="The following recipes have failed to rebuild: %s" % ' '.join(map(str, failed_rebuild)))
+ self.assertFalse(failed_cleansstate, msg="The following recipes have failed cleansstate(all others have passed both cleansstate and rebuild from sstate tests): %s" % ' '.join(map(str, failed_cleansstate)))
+
+ def test_sstate_relocation(self):
+ self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=True, rebuild_dependencies=True)
+
+ def test_sstate_rebuild(self):
+ self.run_test_sstate_rebuild(['core-image-sato-sdk'], relocate=False, rebuild_dependencies=True)
diff --git a/poky/meta/lib/oeqa/selftest/cases/archiver.py b/poky/meta/lib/oeqa/selftest/cases/archiver.py
new file mode 100644
index 000000000..0a6d4e325
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/archiver.py
@@ -0,0 +1,131 @@
+import os
+import glob
+from oeqa.utils.commands import bitbake, get_bb_vars
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+
+class Archiver(OESelftestTestCase):
+
+ @OETestID(1345)
+ def test_archiver_allows_to_filter_on_recipe_name(self):
+ """
+ Summary: The archiver should offer the possibility to filter on the recipe. (#6929)
+ Expected: 1. Included recipe (busybox) should be included
+ 2. Excluded recipe (zlib) should be excluded
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ include_recipe = 'busybox'
+ exclude_recipe = 'zlib'
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % include_recipe
+ features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % exclude_recipe
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (include_recipe, exclude_recipe))
+ bitbake("-c deploy_archives %s %s" % (include_recipe, exclude_recipe))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS'])
+ src_path = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+
+ # Check that include_recipe was included
+ included_present = len(glob.glob(src_path + '/%s-*' % include_recipe))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % include_recipe)
+
+ # Check that exclude_recipe was excluded
+ excluded_present = len(glob.glob(src_path + '/%s-*' % exclude_recipe))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % exclude_recipe)
+
+ @OETestID(1900)
+ def test_archiver_filters_by_type(self):
+ """
+ Summary: The archiver is documented to filter on the recipe type.
+ Expected: 1. included recipe type (target) should be included
+ 2. other types should be excluded
+ Product: oe-core
+ Author: André Draszik <adraszik@tycoint.com>
+ """
+
+ target_recipe = 'initscripts'
+ native_recipe = 'zlib-native'
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_RECIPE_TYPES = "target"\n'
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (target_recipe, native_recipe))
+ bitbake("%s -c deploy_archives %s" % (target_recipe, native_recipe))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS', 'BUILD_SYS'])
+ src_path_target = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+ src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
+
+ # Check that target_recipe was included
+ included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipe))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipe)
+
+ # Check that native_recipe was excluded
+ excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipe))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipe)
+
+ @OETestID(1901)
+ def test_archiver_filters_by_type_and_name(self):
+ """
+ Summary: Test that the archiver archives by recipe type, taking the
+ recipe name into account.
+ Expected: 1. included recipe type (target) should be included
+ 2. other types should be excluded
+ 3. recipe by name should be included / excluded,
+ overriding previous decision by type
+ Product: oe-core
+ Author: André Draszik <adraszik@tycoint.com>
+ """
+
+ target_recipes = [ 'initscripts', 'zlib' ]
+ native_recipes = [ 'update-rc.d-native', 'zlib-native' ]
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[src] = "original"\n'
+ features += 'COPYLEFT_RECIPE_TYPES = "target"\n'
+ features += 'COPYLEFT_PN_INCLUDE = "%s"\n' % native_recipes[1]
+ features += 'COPYLEFT_PN_EXCLUDE = "%s"\n' % target_recipes[1]
+ self.write_config(features)
+
+ bitbake('-c clean %s %s' % (' '.join(target_recipes), ' '.join(native_recipes)))
+ bitbake('-c deploy_archives %s %s' % (' '.join(target_recipes), ' '.join(native_recipes)))
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_SRC', 'TARGET_SYS', 'BUILD_SYS'])
+ src_path_target = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['TARGET_SYS'])
+ src_path_native = os.path.join(bb_vars['DEPLOY_DIR_SRC'], bb_vars['BUILD_SYS'])
+
+ # Check that target_recipe[0] and native_recipes[1] were included
+ included_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[0]))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % target_recipes[0])
+
+ included_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[1]))
+ self.assertTrue(included_present, 'Recipe %s was not included.' % native_recipes[1])
+
+ # Check that native_recipes[0] and target_recipes[1] were excluded
+ excluded_present = len(glob.glob(src_path_native + '/%s-*' % native_recipes[0]))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % native_recipes[0])
+
+ excluded_present = len(glob.glob(src_path_target + '/%s-*' % target_recipes[1]))
+ self.assertFalse(excluded_present, 'Recipe %s was not excluded.' % target_recipes[1])
+
+
+
+ def test_archiver_srpm_mode(self):
+ """
+ Test that in srpm mode, the added recipe dependencies at least exist/work [YOCTO #11121]
+ """
+
+ features = 'INHERIT += "archiver"\n'
+ features += 'ARCHIVER_MODE[srpm] = "1"\n'
+ self.write_config(features)
+
+ bitbake('-n core-image-sato')
diff --git a/poky/meta/lib/oeqa/selftest/cases/bblayers.py b/poky/meta/lib/oeqa/selftest/cases/bblayers.py
new file mode 100644
index 000000000..90a2249b0
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/bblayers.py
@@ -0,0 +1,97 @@
+import os
+import re
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, get_bb_var
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+
+class BitbakeLayers(OESelftestTestCase):
+
+ @OETestID(756)
+ def test_bitbakelayers_showcrossdepends(self):
+ result = runCmd('bitbake-layers show-cross-depends')
+ self.assertTrue('aspell' in result.output, msg = "No dependencies were shown. bitbake-layers show-cross-depends output: %s" % result.output)
+
+ @OETestID(83)
+ def test_bitbakelayers_showlayers(self):
+ result = runCmd('bitbake-layers show-layers')
+ self.assertTrue('meta-selftest' in result.output, msg = "No layers were shown. bitbake-layers show-layers output: %s" % result.output)
+
+ @OETestID(93)
+ def test_bitbakelayers_showappends(self):
+ recipe = "xcursor-transparent-theme"
+ bb_file = self.get_recipe_basename(recipe)
+ result = runCmd('bitbake-layers show-appends')
+ self.assertTrue(bb_file in result.output, msg="%s file was not recognised. bitbake-layers show-appends output: %s" % (bb_file, result.output))
+
+ @OETestID(90)
+ def test_bitbakelayers_showoverlayed(self):
+ result = runCmd('bitbake-layers show-overlayed')
+ self.assertTrue('aspell' in result.output, msg="aspell overlayed recipe was not recognised bitbake-layers show-overlayed %s" % result.output)
+
+ @OETestID(95)
+ def test_bitbakelayers_flatten(self):
+ recipe = "xcursor-transparent-theme"
+ recipe_path = "recipes-graphics/xcursor-transparent-theme"
+ recipe_file = self.get_recipe_basename(recipe)
+ testoutdir = os.path.join(self.builddir, 'test_bitbakelayers_flatten')
+ self.assertFalse(os.path.isdir(testoutdir), msg = "test_bitbakelayers_flatten should not exist at this point in time")
+ self.track_for_cleanup(testoutdir)
+ result = runCmd('bitbake-layers flatten %s' % testoutdir)
+ bb_file = os.path.join(testoutdir, recipe_path, recipe_file)
+ self.assertTrue(os.path.isfile(bb_file), msg = "Cannot find xcursor-transparent-theme_0.1.1.bb in the test_bitbakelayers_flatten local dir.")
+ contents = ftools.read_file(bb_file)
+ find_in_contents = re.search("##### bbappended from meta-selftest #####\n(.*\n)*include test_recipe.inc", contents)
+ self.assertTrue(find_in_contents, msg = "Flattening layers did not work. bitbake-layers flatten output: %s" % result.output)
+
+ @OETestID(1195)
+ def test_bitbakelayers_add_remove(self):
+ test_layer = os.path.join(get_bb_var('COREBASE'), 'meta-skeleton')
+ result = runCmd('bitbake-layers show-layers')
+ self.assertNotIn('meta-skeleton', result.output, "This test cannot run with meta-skeleton in bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
+ result = runCmd('bitbake-layers add-layer %s' % test_layer)
+ result = runCmd('bitbake-layers show-layers')
+ self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
+ result = runCmd('bitbake-layers remove-layer %s' % test_layer)
+ result = runCmd('bitbake-layers show-layers')
+ self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output)
+ result = runCmd('bitbake-layers add-layer %s' % test_layer)
+ result = runCmd('bitbake-layers show-layers')
+ self.assertIn('meta-skeleton', result.output, msg = "Something wrong happened. meta-skeleton layer was not added to conf/bblayers.conf. bitbake-layers show-layers output: %s" % result.output)
+ result = runCmd('bitbake-layers remove-layer */meta-skeleton')
+ result = runCmd('bitbake-layers show-layers')
+ self.assertNotIn('meta-skeleton', result.output, msg = "meta-skeleton should have been removed at this step. bitbake-layers show-layers output: %s" % result.output)
+
+ @OETestID(1384)
+ def test_bitbakelayers_showrecipes(self):
+ result = runCmd('bitbake-layers show-recipes')
+ self.assertIn('aspell:', result.output)
+ self.assertIn('mtd-utils:', result.output)
+ self.assertIn('core-image-minimal:', result.output)
+ result = runCmd('bitbake-layers show-recipes mtd-utils')
+ self.assertIn('mtd-utils:', result.output)
+ self.assertNotIn('aspell:', result.output)
+ result = runCmd('bitbake-layers show-recipes -i image')
+ self.assertIn('core-image-minimal', result.output)
+ self.assertNotIn('mtd-utils:', result.output)
+ result = runCmd('bitbake-layers show-recipes -i cmake,pkgconfig')
+ self.assertIn('libproxy:', result.output)
+ self.assertNotIn('mtd-utils:', result.output) # doesn't inherit either
+ self.assertNotIn('wget:', result.output) # doesn't inherit cmake
+ self.assertNotIn('waffle:', result.output) # doesn't inherit pkgconfig
+ result = runCmd('bitbake-layers show-recipes -i nonexistentclass', ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'bitbake-layers show-recipes -i nonexistentclass should have failed')
+ self.assertIn('ERROR:', result.output)
+
+ def get_recipe_basename(self, recipe):
+ recipe_file = ""
+ result = runCmd("bitbake-layers show-recipes -f %s" % recipe)
+ for line in result.output.splitlines():
+ if recipe in line:
+ recipe_file = line
+ break
+
+ self.assertTrue(os.path.isfile(recipe_file), msg = "Can't find recipe file for %s" % recipe)
+ return os.path.basename(recipe_file)
diff --git a/poky/meta/lib/oeqa/selftest/cases/bbtests.py b/poky/meta/lib/oeqa/selftest/cases/bbtests.py
new file mode 100644
index 000000000..350614967
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/bbtests.py
@@ -0,0 +1,278 @@
+import os
+import re
+
+import oeqa.utils.ftools as ftools
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+
+class BitbakeTests(OESelftestTestCase):
+
+ def getline(self, res, line):
+ for l in res.output.split('\n'):
+ if line in l:
+ return l
+
+ @OETestID(789)
+ def test_run_bitbake_from_dir_1(self):
+ os.chdir(os.path.join(self.builddir, 'conf'))
+ self.assertEqual(bitbake('-e').status, 0, msg = "bitbake couldn't run from \"conf\" dir")
+
+ @OETestID(790)
+ def test_run_bitbake_from_dir_2(self):
+ my_env = os.environ.copy()
+ my_env['BBPATH'] = my_env['BUILDDIR']
+ os.chdir(os.path.dirname(os.environ['BUILDDIR']))
+ self.assertEqual(bitbake('-e', env=my_env).status, 0, msg = "bitbake couldn't run from builddir")
+
+ @OETestID(806)
+ def test_event_handler(self):
+ self.write_config("INHERIT += \"test_events\"")
+ result = bitbake('m4-native')
+ find_build_started = re.search("NOTE: Test for bb\.event\.BuildStarted(\n.*)*NOTE: Executing RunQueue Tasks", result.output)
+ find_build_completed = re.search("Tasks Summary:.*(\n.*)*NOTE: Test for bb\.event\.BuildCompleted", result.output)
+ self.assertTrue(find_build_started, msg = "Match failed in:\n%s" % result.output)
+ self.assertTrue(find_build_completed, msg = "Match failed in:\n%s" % result.output)
+ self.assertFalse('Test for bb.event.InvalidEvent' in result.output, msg = "\"Test for bb.event.InvalidEvent\" message found during bitbake process. bitbake output: %s" % result.output)
+
+ @OETestID(103)
+ def test_local_sstate(self):
+ bitbake('m4-native')
+ bitbake('m4-native -cclean')
+ result = bitbake('m4-native')
+ find_setscene = re.search("m4-native.*do_.*_setscene", result.output)
+ self.assertTrue(find_setscene, msg = "No \"m4-native.*do_.*_setscene\" message found during bitbake m4-native. bitbake output: %s" % result.output )
+
+ @OETestID(105)
+ def test_bitbake_invalid_recipe(self):
+ result = bitbake('-b asdf', ignore_status=True)
+ self.assertTrue("ERROR: Unable to find any recipe file matching 'asdf'" in result.output, msg = "Though asdf recipe doesn't exist, bitbake didn't output any err. message. bitbake output: %s" % result.output)
+
+ @OETestID(107)
+ def test_bitbake_invalid_target(self):
+ result = bitbake('asdf', ignore_status=True)
+ self.assertTrue("ERROR: Nothing PROVIDES 'asdf'" in result.output, msg = "Though no 'asdf' target exists, bitbake didn't output any err. message. bitbake output: %s" % result.output)
+
+ @OETestID(106)
+ def test_warnings_errors(self):
+ result = bitbake('-b asdf', ignore_status=True)
+ find_warnings = re.search("Summary: There w.{2,3}? [1-9][0-9]* WARNING messages* shown", result.output)
+ find_errors = re.search("Summary: There w.{2,3}? [1-9][0-9]* ERROR messages* shown", result.output)
+ self.assertTrue(find_warnings, msg="Did not find the mumber of warnings at the end of the build:\n" + result.output)
+ self.assertTrue(find_errors, msg="Did not find the mumber of errors at the end of the build:\n" + result.output)
+
+ @OETestID(108)
+ def test_invalid_patch(self):
+ # This patch should fail to apply.
+ self.write_recipeinc('man-db', 'FILESEXTRAPATHS_prepend := "${THISDIR}/files:"\nSRC_URI += "file://0001-Test-patch-here.patch"')
+ self.write_config("INHERIT_remove = \"report-error\"")
+ result = bitbake('man-db -c patch', ignore_status=True)
+ self.delete_recipeinc('man-db')
+ bitbake('-cclean man-db')
+ line = self.getline(result, "Function failed: patch_do_patch")
+ self.assertTrue(line and line.startswith("ERROR:"), msg = "Incorrectly formed patch application didn't fail. bitbake output: %s" % result.output)
+
+ @OETestID(1354)
+ def test_force_task_1(self):
+ # test 1 from bug 5875
+ test_recipe = 'zlib'
+ test_data = "Microsoft Made No Profit From Anyone's Zunes Yo"
+ bb_vars = get_bb_vars(['D', 'PKGDEST', 'mandir'], test_recipe)
+ image_dir = bb_vars['D']
+ pkgsplit_dir = bb_vars['PKGDEST']
+ man_dir = bb_vars['mandir']
+
+ bitbake('-c clean %s' % test_recipe)
+ bitbake('-c package -f %s' % test_recipe)
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ man_file = os.path.join(image_dir + man_dir, 'man3/zlib.3')
+ ftools.append_file(man_file, test_data)
+ bitbake('-c package -f %s' % test_recipe)
+
+ man_split_file = os.path.join(pkgsplit_dir, 'zlib-doc' + man_dir, 'man3/zlib.3')
+ man_split_content = ftools.read_file(man_split_file)
+ self.assertIn(test_data, man_split_content, 'The man file has not changed in packages-split.')
+
+ ret = bitbake(test_recipe)
+ self.assertIn('task do_package_write_rpm:', ret.output, 'Task do_package_write_rpm did not re-executed.')
+
+ @OETestID(163)
+ def test_force_task_2(self):
+ # test 2 from bug 5875
+ test_recipe = 'zlib'
+
+ bitbake(test_recipe)
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ result = bitbake('-C compile %s' % test_recipe)
+ look_for_tasks = ['do_compile:', 'do_install:', 'do_populate_sysroot:', 'do_package:']
+ for task in look_for_tasks:
+ self.assertIn(task, result.output, msg="Couldn't find %s task.")
+
+ @OETestID(167)
+ def test_bitbake_g(self):
+ result = bitbake('-g core-image-minimal')
+ for f in ['pn-buildlist', 'recipe-depends.dot', 'task-depends.dot']:
+ self.addCleanup(os.remove, f)
+ self.assertTrue('Task dependencies saved to \'task-depends.dot\'' in result.output, msg = "No task dependency \"task-depends.dot\" file was generated for the given task target. bitbake output: %s" % result.output)
+ self.assertTrue('busybox' in ftools.read_file(os.path.join(self.builddir, 'task-depends.dot')), msg = "No \"busybox\" dependency found in task-depends.dot file.")
+
+ @OETestID(899)
+ def test_image_manifest(self):
+ bitbake('core-image-minimal')
+ bb_vars = get_bb_vars(["DEPLOY_DIR_IMAGE", "IMAGE_LINK_NAME"], "core-image-minimal")
+ deploydir = bb_vars["DEPLOY_DIR_IMAGE"]
+ imagename = bb_vars["IMAGE_LINK_NAME"]
+ manifest = os.path.join(deploydir, imagename + ".manifest")
+ self.assertTrue(os.path.islink(manifest), msg="No manifest file created for image. It should have been created in %s" % manifest)
+
+ @OETestID(168)
+ def test_invalid_recipe_src_uri(self):
+ data = 'SRC_URI = "file://invalid"'
+ self.write_recipeinc('man-db', data)
+ self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
+SSTATE_DIR = \"${TOPDIR}/download-selftest\"
+INHERIT_remove = \"report-error\"
+""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
+
+ bitbake('-ccleanall man-db')
+ result = bitbake('-c fetch man-db', ignore_status=True)
+ bitbake('-ccleanall man-db')
+ self.delete_recipeinc('man-db')
+ self.assertEqual(result.status, 1, msg="Command succeded when it should have failed. bitbake output: %s" % result.output)
+ self.assertTrue('Fetcher failure: Unable to find file file://invalid anywhere. The paths that were searched were:' in result.output, msg = "\"invalid\" file \
+doesn't exist, yet no error message encountered. bitbake output: %s" % result.output)
+ line = self.getline(result, 'Fetcher failure for URL: \'file://invalid\'. Unable to fetch URL from any source.')
+ self.assertTrue(line and line.startswith("ERROR:"), msg = "\"invalid\" file \
+doesn't exist, yet fetcher didn't report any error. bitbake output: %s" % result.output)
+
+ @OETestID(171)
+ def test_rename_downloaded_file(self):
+ # TODO unique dldir instead of using cleanall
+ # TODO: need to set sstatedir?
+ self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
+SSTATE_DIR = \"${TOPDIR}/download-selftest\"
+""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
+
+ data = 'SRC_URI = "${GNU_MIRROR}/aspell/aspell-${PV}.tar.gz;downloadfilename=test-aspell.tar.gz"'
+ self.write_recipeinc('aspell', data)
+ result = bitbake('-f -c fetch aspell', ignore_status=True)
+ self.delete_recipeinc('aspell')
+ self.assertEqual(result.status, 0, msg = "Couldn't fetch aspell. %s" % result.output)
+ dl_dir = get_bb_var("DL_DIR")
+ self.assertTrue(os.path.isfile(os.path.join(dl_dir, 'test-aspell.tar.gz')), msg = "File rename failed. No corresponding test-aspell.tar.gz file found under %s" % dl_dir)
+ self.assertTrue(os.path.isfile(os.path.join(dl_dir, 'test-aspell.tar.gz.done')), "File rename failed. No corresponding test-aspell.tar.gz.done file found under %s" % dl_dir)
+
+ @OETestID(1028)
+ def test_environment(self):
+ self.write_config("TEST_ENV=\"localconf\"")
+ result = runCmd('bitbake -e | grep TEST_ENV=')
+ self.assertTrue('localconf' in result.output, msg = "bitbake didn't report any value for TEST_ENV variable. To test, run 'bitbake -e | grep TEST_ENV='")
+
+ @OETestID(1029)
+ def test_dry_run(self):
+ result = runCmd('bitbake -n m4-native')
+ self.assertEqual(0, result.status, "bitbake dry run didn't run as expected. %s" % result.output)
+
+ @OETestID(1030)
+ def test_just_parse(self):
+ result = runCmd('bitbake -p')
+ self.assertEqual(0, result.status, "errors encountered when parsing recipes. %s" % result.output)
+
+ @OETestID(1031)
+ def test_version(self):
+ result = runCmd('bitbake -s | grep wget')
+ find = re.search("wget *:([0-9a-zA-Z\.\-]+)", result.output)
+ self.assertTrue(find, "No version returned for searched recipe. bitbake output: %s" % result.output)
+
+ @OETestID(1032)
+ def test_prefile(self):
+ preconf = os.path.join(self.builddir, 'conf/prefile.conf')
+ self.track_for_cleanup(preconf)
+ ftools.write_file(preconf ,"TEST_PREFILE=\"prefile\"")
+ result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
+ self.assertTrue('prefile' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration. ")
+ self.write_config("TEST_PREFILE=\"localconf\"")
+ result = runCmd('bitbake -r conf/prefile.conf -e | grep TEST_PREFILE=')
+ self.assertTrue('localconf' in result.output, "Preconfigure file \"prefile.conf\"was not taken into consideration.")
+
+ @OETestID(1033)
+ def test_postfile(self):
+ postconf = os.path.join(self.builddir, 'conf/postfile.conf')
+ self.track_for_cleanup(postconf)
+ ftools.write_file(postconf , "TEST_POSTFILE=\"postfile\"")
+ self.write_config("TEST_POSTFILE=\"localconf\"")
+ result = runCmd('bitbake -R conf/postfile.conf -e | grep TEST_POSTFILE=')
+ self.assertTrue('postfile' in result.output, "Postconfigure file \"postfile.conf\"was not taken into consideration.")
+
+ @OETestID(1034)
+ def test_checkuri(self):
+ result = runCmd('bitbake -c checkuri m4')
+ self.assertEqual(0, result.status, msg = "\"checkuri\" task was not executed. bitbake output: %s" % result.output)
+
+ @OETestID(1035)
+ def test_continue(self):
+ self.write_config("""DL_DIR = \"${TOPDIR}/download-selftest\"
+SSTATE_DIR = \"${TOPDIR}/download-selftest\"
+INHERIT_remove = \"report-error\"
+""")
+ self.track_for_cleanup(os.path.join(self.builddir, "download-selftest"))
+ self.write_recipeinc('man-db',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" )
+ runCmd('bitbake -c cleanall man-db xcursor-transparent-theme')
+ result = runCmd('bitbake -c unpack -k man-db xcursor-transparent-theme', ignore_status=True)
+ errorpos = result.output.find('ERROR: Function failed: do_fail_task')
+ manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output)
+ continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1))
+ self.assertLess(errorpos,continuepos, msg = "bitbake didn't pass do_fail_task. bitbake output: %s" % result.output)
+
+ @OETestID(1119)
+ def test_non_gplv3(self):
+ self.write_config('INCOMPATIBLE_LICENSE = "GPLv3"')
+ result = bitbake('selftest-ed', ignore_status=True)
+ self.assertEqual(result.status, 0, "Bitbake failed, exit code %s, output %s" % (result.status, result.output))
+ lic_dir = get_bb_var('LICENSE_DIRECTORY')
+ self.assertFalse(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv3')))
+ self.assertTrue(os.path.isfile(os.path.join(lic_dir, 'selftest-ed/generic_GPLv2')))
+
+ @OETestID(1422)
+ def test_setscene_only(self):
+ """ Bitbake option to restore from sstate only within a build (i.e. execute no real tasks, only setscene)"""
+ test_recipe = 'ed'
+
+ bitbake(test_recipe)
+ bitbake('-c clean %s' % test_recipe)
+ ret = bitbake('--setscene-only %s' % test_recipe)
+
+ tasks = re.findall(r'task\s+(do_\S+):', ret.output)
+
+ for task in tasks:
+ self.assertIn('_setscene', task, 'A task different from _setscene ran: %s.\n'
+ 'Executed tasks were: %s' % (task, str(tasks)))
+
+ @OETestID(1425)
+ def test_bbappend_order(self):
+ """ Bitbake should bbappend to recipe in a predictable order """
+ test_recipe = 'ed'
+ bb_vars = get_bb_vars(['SUMMARY', 'PV'], test_recipe)
+ test_recipe_summary_before = bb_vars['SUMMARY']
+ test_recipe_pv = bb_vars['PV']
+ recipe_append_file = test_recipe + '_' + test_recipe_pv + '.bbappend'
+ expected_recipe_summary = test_recipe_summary_before
+
+ for i in range(5):
+ recipe_append_dir = test_recipe + '_test_' + str(i)
+ recipe_append_path = os.path.join(self.testlayer_path, 'recipes-test', recipe_append_dir, recipe_append_file)
+ os.mkdir(os.path.join(self.testlayer_path, 'recipes-test', recipe_append_dir))
+ feature = 'SUMMARY += "%s"\n' % i
+ ftools.write_file(recipe_append_path, feature)
+ expected_recipe_summary += ' %s' % i
+
+ self.add_command_to_tearDown('rm -rf %s' % os.path.join(self.testlayer_path, 'recipes-test',
+ test_recipe + '_test_*'))
+
+ test_recipe_summary_after = get_bb_var('SUMMARY', test_recipe)
+ self.assertEqual(expected_recipe_summary, test_recipe_summary_after)
diff --git a/poky/meta/lib/oeqa/selftest/cases/buildhistory.py b/poky/meta/lib/oeqa/selftest/cases/buildhistory.py
new file mode 100644
index 000000000..06792d914
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/buildhistory.py
@@ -0,0 +1,46 @@
+import os
+import re
+import datetime
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars
+
+
+class BuildhistoryBase(OESelftestTestCase):
+
+ def config_buildhistory(self, tmp_bh_location=False):
+ bb_vars = get_bb_vars(['USER_CLASSES', 'INHERIT'])
+ if (not 'buildhistory' in bb_vars['USER_CLASSES']) and (not 'buildhistory' in bb_vars['INHERIT']):
+ add_buildhistory_config = 'INHERIT += "buildhistory"\nBUILDHISTORY_COMMIT = "1"'
+ self.append_config(add_buildhistory_config)
+
+ if tmp_bh_location:
+ # Using a temporary buildhistory location for testing
+ tmp_bh_dir = os.path.join(self.builddir, "tmp_buildhistory_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
+ buildhistory_dir_config = "BUILDHISTORY_DIR = \"%s\"" % tmp_bh_dir
+ self.append_config(buildhistory_dir_config)
+ self.track_for_cleanup(tmp_bh_dir)
+
+ def run_buildhistory_operation(self, target, global_config='', target_config='', change_bh_location=False, expect_error=False, error_regex=''):
+ if change_bh_location:
+ tmp_bh_location = True
+ else:
+ tmp_bh_location = False
+ self.config_buildhistory(tmp_bh_location)
+
+ self.append_config(global_config)
+ self.append_recipeinc(target, target_config)
+ bitbake("-cclean %s" % target)
+ result = bitbake(target, ignore_status=True)
+ self.remove_config(global_config)
+ self.remove_recipeinc(target, target_config)
+
+ if expect_error:
+ self.assertEqual(result.status, 1, msg="Error expected for global config '%s' and target config '%s'" % (global_config, target_config))
+ search_for_error = re.search(error_regex, result.output)
+ self.assertTrue(search_for_error, msg="Could not find desired error in output: %s (%s)" % (error_regex, result.output))
+ else:
+ self.assertEqual(result.status, 0, msg="Command 'bitbake %s' has failed unexpectedly: %s" % (target, result.output))
+
+ # No tests should be added to the base class.
+ # Please create a new class that inherit this one, or use one of those already available for adding tests.
diff --git a/poky/meta/lib/oeqa/selftest/cases/buildoptions.py b/poky/meta/lib/oeqa/selftest/cases/buildoptions.py
new file mode 100644
index 000000000..e60e32dad
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/buildoptions.py
@@ -0,0 +1,180 @@
+import os
+import re
+import glob as g
+import shutil
+import tempfile
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.selftest.cases.buildhistory import BuildhistoryBase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+import oeqa.utils.ftools as ftools
+from oeqa.core.decorator.oeid import OETestID
+
+class ImageOptionsTests(OESelftestTestCase):
+
+ @OETestID(761)
+ def test_incremental_image_generation(self):
+ image_pkgtype = get_bb_var("IMAGE_PKGTYPE")
+ if image_pkgtype != 'rpm':
+ self.skipTest('Not using RPM as main package format')
+ bitbake("-c clean core-image-minimal")
+ self.write_config('INC_RPM_IMAGE_GEN = "1"')
+ self.append_config('IMAGE_FEATURES += "ssh-server-openssh"')
+ bitbake("core-image-minimal")
+ log_data_file = os.path.join(get_bb_var("WORKDIR", "core-image-minimal"), "temp/log.do_rootfs")
+ log_data_created = ftools.read_file(log_data_file)
+ incremental_created = re.search(r"Installing\s*:\s*packagegroup-core-ssh-openssh", log_data_created)
+ self.remove_config('IMAGE_FEATURES += "ssh-server-openssh"')
+ self.assertTrue(incremental_created, msg = "Match failed in:\n%s" % log_data_created)
+ bitbake("core-image-minimal")
+ log_data_removed = ftools.read_file(log_data_file)
+ incremental_removed = re.search(r"Erasing\s*:\s*packagegroup-core-ssh-openssh", log_data_removed)
+ self.assertTrue(incremental_removed, msg = "Match failed in:\n%s" % log_data_removed)
+
+ @OETestID(286)
+ def test_ccache_tool(self):
+ bitbake("ccache-native")
+ bb_vars = get_bb_vars(['SYSROOT_DESTDIR', 'bindir'], 'ccache-native')
+ p = bb_vars['SYSROOT_DESTDIR'] + bb_vars['bindir'] + "/" + "ccache"
+ self.assertTrue(os.path.isfile(p), msg = "No ccache found (%s)" % p)
+ self.write_config('INHERIT += "ccache"')
+ self.add_command_to_tearDown('bitbake -c clean m4')
+ bitbake("m4 -f -c compile")
+ log_compile = os.path.join(get_bb_var("WORKDIR","m4"), "temp/log.do_compile")
+ res = runCmd("grep ccache %s" % log_compile, ignore_status=True)
+ self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile. For further details: %s" % log_compile)
+
+ @OETestID(1435)
+ def test_read_only_image(self):
+ distro_features = get_bb_var('DISTRO_FEATURES')
+ if not ('x11' in distro_features and 'opengl' in distro_features):
+ self.skipTest('core-image-sato requires x11 and opengl in distro features')
+ self.write_config('IMAGE_FEATURES += "read-only-rootfs"')
+ bitbake("core-image-sato")
+ # do_image will fail if there are any pending postinsts
+
+class DiskMonTest(OESelftestTestCase):
+
+ @OETestID(277)
+ def test_stoptask_behavior(self):
+ self.write_config('BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},100000G,100K"')
+ res = bitbake("m4", ignore_status = True)
+ self.assertTrue('ERROR: No new tasks can be executed since the disk space monitor action is "STOPTASKS"!' in res.output, msg = "Tasks should have stopped. Disk monitor is set to STOPTASK: %s" % res.output)
+ self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
+ self.write_config('BB_DISKMON_DIRS = "ABORT,${TMPDIR},100000G,100K"')
+ res = bitbake("m4", ignore_status = True)
+ self.assertTrue('ERROR: Immediately abort since the disk space monitor action is "ABORT"!' in res.output, "Tasks should have been aborted immediatelly. Disk monitor is set to ABORT: %s" % res.output)
+ self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
+ self.write_config('BB_DISKMON_DIRS = "WARN,${TMPDIR},100000G,100K"')
+ res = bitbake("m4")
+ self.assertTrue('WARNING: The free space' in res.output, msg = "A warning should have been displayed for disk monitor is set to WARN: %s" %res.output)
+
+class SanityOptionsTest(OESelftestTestCase):
+ def getline(self, res, line):
+ for l in res.output.split('\n'):
+ if line in l:
+ return l
+
+ @OETestID(927)
+ def test_options_warnqa_errorqa_switch(self):
+
+ self.write_config("INHERIT_remove = \"report-error\"")
+ if "packages-list" not in get_bb_var("ERROR_QA"):
+ self.append_config("ERROR_QA_append = \" packages-list\"")
+
+ self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
+ self.add_command_to_tearDown('bitbake -c clean xcursor-transparent-theme')
+ res = bitbake("xcursor-transparent-theme -f -c package", ignore_status=True)
+ self.delete_recipeinc('xcursor-transparent-theme')
+ line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.")
+ self.assertTrue(line and line.startswith("ERROR:"), msg=res.output)
+ self.assertEqual(res.status, 1, msg = "bitbake reported exit code %s. It should have been 1. Bitbake output: %s" % (str(res.status), res.output))
+ self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"')
+ self.append_config('ERROR_QA_remove = "packages-list"')
+ self.append_config('WARN_QA_append = " packages-list"')
+ res = bitbake("xcursor-transparent-theme -f -c package")
+ self.delete_recipeinc('xcursor-transparent-theme')
+ line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.")
+ self.assertTrue(line and line.startswith("WARNING:"), msg=res.output)
+
+ @OETestID(1421)
+ def test_layer_without_git_dir(self):
+ """
+ Summary: Test that layer git revisions are displayed and do not fail without git repository
+ Expected: The build to be successful and without "fatal" errors
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ dirpath = tempfile.mkdtemp()
+
+ dummy_layer_name = 'meta-dummy'
+ dummy_layer_path = os.path.join(dirpath, dummy_layer_name)
+ dummy_layer_conf_dir = os.path.join(dummy_layer_path, 'conf')
+ os.makedirs(dummy_layer_conf_dir)
+ dummy_layer_conf_path = os.path.join(dummy_layer_conf_dir, 'layer.conf')
+
+ dummy_layer_content = 'BBPATH .= ":${LAYERDIR}"\n' \
+ 'BBFILES += "${LAYERDIR}/recipes-*/*/*.bb ${LAYERDIR}/recipes-*/*/*.bbappend"\n' \
+ 'BBFILE_COLLECTIONS += "%s"\n' \
+ 'BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' \
+ 'BBFILE_PRIORITY_%s = "6"\n' % (dummy_layer_name, dummy_layer_name, dummy_layer_name)
+
+ ftools.write_file(dummy_layer_conf_path, dummy_layer_content)
+
+ bblayers_conf = 'BBLAYERS += "%s"\n' % dummy_layer_path
+ self.write_bblayers_config(bblayers_conf)
+
+ test_recipe = 'ed'
+
+ ret = bitbake('-n %s' % test_recipe)
+
+ err = 'fatal: Not a git repository'
+
+ shutil.rmtree(dirpath)
+
+ self.assertNotIn(err, ret.output)
+
+
+class BuildhistoryTests(BuildhistoryBase):
+
+ @OETestID(293)
+ def test_buildhistory_basic(self):
+ self.run_buildhistory_operation('xcursor-transparent-theme')
+ self.assertTrue(os.path.isdir(get_bb_var('BUILDHISTORY_DIR')), "buildhistory dir was not created.")
+
+ @OETestID(294)
+ def test_buildhistory_buildtime_pr_backwards(self):
+ target = 'xcursor-transparent-theme'
+ error = "ERROR:.*QA Issue: Package version for package %s went backwards which would break package feeds from (.*-r1.* to .*-r0.*)" % target
+ self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
+ self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True, error_regex=error)
+
+class ArchiverTest(OESelftestTestCase):
+ @OETestID(926)
+ def test_arch_work_dir_and_export_source(self):
+ """
+ Test for archiving the work directory and exporting the source files.
+ """
+ self.write_config("INHERIT += \"archiver\"\nARCHIVER_MODE[src] = \"original\"\nARCHIVER_MODE[srpm] = \"1\"")
+ res = bitbake("xcursor-transparent-theme", ignore_status=True)
+ self.assertEqual(res.status, 0, "\nCouldn't build xcursortransparenttheme.\nbitbake output %s" % res.output)
+ deploy_dir_src = get_bb_var('DEPLOY_DIR_SRC')
+ pkgs_path = g.glob(str(deploy_dir_src) + "/allarch*/xcurs*")
+ src_file_glob = str(pkgs_path[0]) + "/xcursor*.src.rpm"
+ tar_file_glob = str(pkgs_path[0]) + "/xcursor*.tar.gz"
+ self.assertTrue((g.glob(src_file_glob) and g.glob(tar_file_glob)), "Couldn't find .src.rpm and .tar.gz files under %s/allarch*/xcursor*" % deploy_dir_src)
+
+class ToolchainOptions(OESelftestTestCase):
+
+ def test_toolchain_fortran(self):
+ """
+ Test whether we can enable and build fortran and its supporting libraries
+ """
+
+ features = 'FORTRAN_forcevariable = ",fortran"\n'
+ features += 'RUNTIMETARGET_append_pn-gcc-runtime = " libquadmath"\n'
+ self.write_config(features)
+
+ bitbake('gcc-runtime libgfortran')
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/containerimage.py b/poky/meta/lib/oeqa/selftest/cases/containerimage.py
new file mode 100644
index 000000000..99a5cc9e5
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/containerimage.py
@@ -0,0 +1,85 @@
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
+from oeqa.core.decorator.oeid import OETestID
+
+# This test builds an image with using the "container" IMAGE_FSTYPE, and
+# ensures that then files in the image are only the ones expected.
+#
+# The only package added to the image is container_image_testpkg, which
+# contains one file. However, due to some other things not cleaning up during
+# rootfs creation, there is some cruft. Ideally bugs will be filed and the
+# cruft removed, but for now we whitelist some known set.
+#
+# Also for performance reasons we're only checking the cruft when using ipk.
+# When using deb, and rpm it is a bit different and we could test all
+# of them, but this test is more to catch if other packages get added by
+# default other than what is in ROOTFS_BOOTSTRAP_INSTALL.
+#
+class ContainerImageTests(OESelftestTestCase):
+
+ # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # the conversion type bar gets added as a dep as well
+ @OETestID(1619)
+ def test_expected_files(self):
+
+ def get_each_path_part(path):
+ if path:
+ part = [ '.' + path + '/' ]
+ result = get_each_path_part(path.rsplit('/', 1)[0])
+ if result:
+ return part + result
+ else:
+ return part
+ else:
+ return None
+
+ self.write_config("""PREFERRED_PROVIDER_virtual/kernel = "linux-dummy"
+IMAGE_FSTYPES = "container"
+PACKAGE_CLASSES = "package_ipk"
+IMAGE_FEATURES = ""
+""")
+
+ bbvars = get_bb_vars(['bindir', 'sysconfdir', 'localstatedir',
+ 'DEPLOY_DIR_IMAGE', 'IMAGE_LINK_NAME'],
+ target='container-test-image')
+ expected_files = [
+ './',
+ '.{bindir}/theapp',
+ '.{sysconfdir}/default/',
+ '.{sysconfdir}/default/postinst',
+ '.{sysconfdir}/ld.so.cache',
+ '.{sysconfdir}/timestamp',
+ '.{sysconfdir}/version',
+ './run/',
+ '.{localstatedir}/cache/',
+ '.{localstatedir}/cache/ldconfig/',
+ '.{localstatedir}/cache/ldconfig/aux-cache',
+ '.{localstatedir}/cache/opkg/',
+ '.{localstatedir}/lib/',
+ '.{localstatedir}/lib/opkg/'
+ ]
+
+ expected_files = [ x.format(bindir=bbvars['bindir'],
+ sysconfdir=bbvars['sysconfdir'],
+ localstatedir=bbvars['localstatedir'])
+ for x in expected_files ]
+
+ # Since tar lists all directories individually, make sure each element
+ # from bindir, sysconfdir, etc is added
+ expected_files += get_each_path_part(bbvars['bindir'])
+ expected_files += get_each_path_part(bbvars['sysconfdir'])
+ expected_files += get_each_path_part(bbvars['localstatedir'])
+
+ expected_files = sorted(expected_files)
+
+ # Build the image of course
+ bitbake('container-test-image')
+
+ image = os.path.join(bbvars['DEPLOY_DIR_IMAGE'],
+ bbvars['IMAGE_LINK_NAME'] + '.tar.bz2')
+
+ # Ensure the files in the image are what we expect
+ result = runCmd("tar tf {} | sort".format(image), shell=True)
+ self.assertEqual(result.output.split('\n'), expected_files)
diff --git a/poky/meta/lib/oeqa/selftest/cases/devtool.py b/poky/meta/lib/oeqa/selftest/cases/devtool.py
new file mode 100644
index 000000000..d5b6a46d4
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/devtool.py
@@ -0,0 +1,1716 @@
+import os
+import re
+import shutil
+import tempfile
+import glob
+import fnmatch
+
+import oeqa.utils.ftools as ftools
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer
+from oeqa.utils.commands import get_bb_vars, runqemu, get_test_layer
+from oeqa.core.decorator.oeid import OETestID
+
+class DevtoolBase(OESelftestTestCase):
+
+ buffer = True
+
+ def _test_recipe_contents(self, recipefile, checkvars, checkinherits):
+ with open(recipefile, 'r') as f:
+ invar = None
+ invalue = None
+ for line in f:
+ var = None
+ if invar:
+ value = line.strip().strip('"')
+ if value.endswith('\\'):
+ invalue += ' ' + value[:-1].strip()
+ continue
+ else:
+ invalue += ' ' + value.strip()
+ var = invar
+ value = invalue
+ invar = None
+ elif '=' in line:
+ splitline = line.split('=', 1)
+ var = splitline[0].rstrip()
+ value = splitline[1].strip().strip('"')
+ if value.endswith('\\'):
+ invalue = value[:-1].strip()
+ invar = var
+ continue
+ elif line.startswith('inherit '):
+ inherits = line.split()[1:]
+
+ if var and var in checkvars:
+ needvalue = checkvars.pop(var)
+ if needvalue is None:
+ self.fail('Variable %s should not appear in recipe, but value is being set to "%s"' % (var, value))
+ if isinstance(needvalue, set):
+ if var == 'LICENSE':
+ value = set(value.split(' & '))
+ else:
+ value = set(value.split())
+ self.assertEqual(value, needvalue, 'values for %s do not match' % var)
+
+
+ missingvars = {}
+ for var, value in checkvars.items():
+ if value is not None:
+ missingvars[var] = value
+ self.assertEqual(missingvars, {}, 'Some expected variables not found in recipe: %s' % checkvars)
+
+ for inherit in checkinherits:
+ self.assertIn(inherit, inherits, 'Missing inherit of %s' % inherit)
+
+ def _check_bbappend(self, testrecipe, recipefile, appenddir):
+ result = runCmd('bitbake-layers show-appends', cwd=self.builddir)
+ resultlines = result.output.splitlines()
+ inrecipe = False
+ bbappends = []
+ bbappendfile = None
+ for line in resultlines:
+ if inrecipe:
+ if line.startswith(' '):
+ bbappends.append(line.strip())
+ else:
+ break
+ elif line == '%s:' % os.path.basename(recipefile):
+ inrecipe = True
+ self.assertLessEqual(len(bbappends), 2, '%s recipe is being bbappended by another layer - bbappends found:\n %s' % (testrecipe, '\n '.join(bbappends)))
+ for bbappend in bbappends:
+ if bbappend.startswith(appenddir):
+ bbappendfile = bbappend
+ break
+ else:
+ self.fail('bbappend for recipe %s does not seem to be created in test layer' % testrecipe)
+ return bbappendfile
+
+ def _create_temp_layer(self, templayerdir, addlayer, templayername, priority=999, recipepathspec='recipes-*/*'):
+ create_temp_layer(templayerdir, templayername, priority, recipepathspec)
+ if addlayer:
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s || true' % templayerdir)
+ result = runCmd('bitbake-layers add-layer %s' % templayerdir, cwd=self.builddir)
+
+ def _process_ls_output(self, output):
+ """
+ Convert ls -l output to a format we can reasonably compare from one context
+ to another (e.g. from host to target)
+ """
+ filelist = []
+ for line in output.splitlines():
+ splitline = line.split()
+ if len(splitline) < 8:
+ self.fail('_process_ls_output: invalid output line: %s' % line)
+ # Remove trailing . on perms
+ splitline[0] = splitline[0].rstrip('.')
+ # Remove leading . on paths
+ splitline[-1] = splitline[-1].lstrip('.')
+ # Drop fields we don't want to compare
+ del splitline[7]
+ del splitline[6]
+ del splitline[5]
+ del splitline[4]
+ del splitline[1]
+ filelist.append(' '.join(splitline))
+ return filelist
+
+
+class DevtoolTests(DevtoolBase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(DevtoolTests, cls).setUpClass()
+ bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR'])
+ cls.original_sstate = bb_vars['SSTATE_DIR']
+ cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool')
+ cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate
+ cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n'
+ % cls.original_sstate)
+
+ @classmethod
+ def tearDownClass(cls):
+ cls.logger.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate)
+ runCmd('rm -rf %s' % cls.devtool_sstate)
+ super(DevtoolTests, cls).tearDownClass()
+
+ def setUp(self):
+ """Test case setup function"""
+ super(DevtoolTests, self).setUp()
+ self.workspacedir = os.path.join(self.builddir, 'workspace')
+ self.assertTrue(not os.path.exists(self.workspacedir),
+ 'This test cannot be run with a workspace directory '
+ 'under the build directory')
+ self.append_config(self.sstate_conf)
+
+ def _check_src_repo(self, repo_dir):
+ """Check srctree git repository"""
+ self.assertTrue(os.path.isdir(os.path.join(repo_dir, '.git')),
+ 'git repository for external source tree not found')
+ result = runCmd('git status --porcelain', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "",
+ 'Created git repo is not clean')
+ result = runCmd('git symbolic-ref HEAD', cwd=repo_dir)
+ self.assertEqual(result.output.strip(), "refs/heads/devtool",
+ 'Wrong branch in git repo')
+
+ def _check_repo_status(self, repo_dir, expected_status):
+ """Check the worktree status of a repository"""
+ result = runCmd('git status . --porcelain',
+ cwd=repo_dir)
+ for line in result.output.splitlines():
+ for ind, (f_status, fn_re) in enumerate(expected_status):
+ if re.match(fn_re, line[3:]):
+ if f_status != line[:2]:
+ self.fail('Unexpected status in line: %s' % line)
+ expected_status.pop(ind)
+ break
+ else:
+ self.fail('Unexpected modified file in line: %s' % line)
+ if expected_status:
+ self.fail('Missing file changes: %s' % expected_status)
+
+ @OETestID(1158)
+ def test_create_workspace(self):
+ # Check preconditions
+ result = runCmd('bitbake-layers show-layers')
+ self.assertTrue('\nworkspace' not in result.output, 'This test cannot be run with a workspace layer in bblayers.conf')
+ # Try creating a workspace layer with a specific path
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ result = runCmd('devtool create-workspace %s' % tempdir)
+ self.assertTrue(os.path.isfile(os.path.join(tempdir, 'conf', 'layer.conf')), msg = "No workspace created. devtool output: %s " % result.output)
+ result = runCmd('bitbake-layers show-layers')
+ self.assertIn(tempdir, result.output)
+ # Try creating a workspace layer with the default path
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool create-workspace')
+ self.assertTrue(os.path.isfile(os.path.join(self.workspacedir, 'conf', 'layer.conf')), msg = "No workspace created. devtool output: %s " % result.output)
+ result = runCmd('bitbake-layers show-layers')
+ self.assertNotIn(tempdir, result.output)
+ self.assertIn(self.workspacedir, result.output)
+
+ @OETestID(1159)
+ def test_devtool_add(self):
+ # Fetch source
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ pn = 'pv'
+ pv = '1.5.3'
+ url = 'http://www.ivarch.com/programs/sources/pv-1.5.3.tar.bz2'
+ result = runCmd('wget %s' % url, cwd=tempdir)
+ result = runCmd('tar xfv %s' % os.path.basename(url), cwd=tempdir)
+ srcdir = os.path.join(tempdir, '%s-%s' % (pn, pv))
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure')), 'Unable to find configure script in source directory')
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c cleansstate %s' % pn)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add %s %s' % (pn, srcdir))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ recipepath = '%s/recipes/%s/%s_%s.bb' % (self.workspacedir, pn, pn, pv)
+ self.assertIn(recipepath, result.output)
+ self.assertIn(srcdir, result.output)
+ # Test devtool find-recipe
+ result = runCmd('devtool -q find-recipe %s' % pn)
+ self.assertEqual(recipepath, result.output.strip())
+ # Test devtool edit-recipe
+ result = runCmd('VISUAL="echo 123" devtool -q edit-recipe %s' % pn)
+ self.assertEqual('123 %s' % recipepath, result.output.strip())
+ # Clean up anything in the workdir/sysroot/sstate cache (have to do this *after* devtool add since the recipe only exists then)
+ bitbake('%s -c cleansstate' % pn)
+ # Test devtool build
+ result = runCmd('devtool build %s' % pn)
+ bb_vars = get_bb_vars(['D', 'bindir'], pn)
+ installdir = bb_vars['D']
+ self.assertTrue(installdir, 'Could not query installdir variable')
+ bindir = bb_vars['bindir']
+ self.assertTrue(bindir, 'Could not query bindir variable')
+ if bindir[0] == '/':
+ bindir = bindir[1:]
+ self.assertTrue(os.path.isfile(os.path.join(installdir, bindir, 'pv')), 'pv binary not found in D')
+
+ @OETestID(1423)
+ def test_devtool_add_git_local(self):
+ # Fetch source from a remote URL, but do it outside of devtool
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ pn = 'dbus-wait'
+ srcrev = '6cc6077a36fe2648a5f993fe7c16c9632f946517'
+ # We choose an https:// git URL here to check rewriting the URL works
+ url = 'https://git.yoctoproject.org/git/dbus-wait'
+ # Force fetching to "noname" subdir so we verify we're picking up the name from autoconf
+ # instead of the directory name
+ result = runCmd('git clone %s noname' % url, cwd=tempdir)
+ srcdir = os.path.join(tempdir, 'noname')
+ result = runCmd('git reset --hard %s' % srcrev, cwd=srcdir)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure script in source directory')
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # Don't specify a name since we should be able to auto-detect it
+ result = runCmd('devtool add %s' % srcdir)
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ # Check the recipe name is correct
+ recipefile = get_bb_var('FILE', pn)
+ self.assertIn('%s_git.bb' % pn, recipefile, 'Recipe file incorrectly named')
+ self.assertIn(recipefile, result.output)
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(pn, result.output)
+ self.assertIn(srcdir, result.output)
+ self.assertIn(recipefile, result.output)
+ checkvars = {}
+ checkvars['LICENSE'] = 'GPLv2'
+ checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
+ checkvars['S'] = '${WORKDIR}/git'
+ checkvars['PV'] = '0.1+git${SRCPV}'
+ checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/dbus-wait;protocol=https'
+ checkvars['SRCREV'] = srcrev
+ checkvars['DEPENDS'] = set(['dbus'])
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ @OETestID(1162)
+ def test_devtool_add_library(self):
+ # Fetch source
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ version = '1.1'
+ url = 'https://www.intra2net.com/en/developer/libftdi/download/libftdi1-%s.tar.bz2' % version
+ result = runCmd('wget %s' % url, cwd=tempdir)
+ result = runCmd('tar xfv libftdi1-%s.tar.bz2' % version, cwd=tempdir)
+ srcdir = os.path.join(tempdir, 'libftdi1-%s' % version)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'CMakeLists.txt')), 'Unable to find CMakeLists.txt in source directory')
+ # Test devtool add (and use -V so we test that too)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add libftdi %s -V %s' % (srcdir, version))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn('libftdi', result.output)
+ self.assertIn(srcdir, result.output)
+ # Clean up anything in the workdir/sysroot/sstate cache (have to do this *after* devtool add since the recipe only exists then)
+ bitbake('libftdi -c cleansstate')
+ # libftdi's python/CMakeLists.txt is a bit broken, so let's just disable it
+ # There's also the matter of it installing cmake files to a path we don't
+ # normally cover, which triggers the installed-vs-shipped QA test we have
+ # within do_package
+ recipefile = '%s/recipes/libftdi/libftdi_%s.bb' % (self.workspacedir, version)
+ result = runCmd('recipetool setvar %s EXTRA_OECMAKE -- \'-DPYTHON_BINDINGS=OFF -DLIBFTDI_CMAKE_CONFIG_DIR=${datadir}/cmake/Modules\'' % recipefile)
+ with open(recipefile, 'a') as f:
+ f.write('\nFILES_${PN}-dev += "${datadir}/cmake/Modules"\n')
+ # We don't have the ability to pick up this dependency automatically yet...
+ f.write('\nDEPENDS += "libusb1"\n')
+ f.write('\nTESTLIBOUTPUT = "${COMPONENTS_DIR}/${TUNE_PKGARCH}/${PN}/${libdir}"\n')
+ # Test devtool build
+ result = runCmd('devtool build libftdi')
+ bb_vars = get_bb_vars(['TESTLIBOUTPUT', 'STAMP'], 'libftdi')
+ staging_libdir = bb_vars['TESTLIBOUTPUT']
+ self.assertTrue(staging_libdir, 'Could not query TESTLIBOUTPUT variable')
+ self.assertTrue(os.path.isfile(os.path.join(staging_libdir, 'libftdi1.so.2.1.0')), "libftdi binary not found in STAGING_LIBDIR. Output of devtool build libftdi %s" % result.output)
+ # Test devtool reset
+ stampprefix = bb_vars['STAMP']
+ result = runCmd('devtool reset libftdi')
+ result = runCmd('devtool status')
+ self.assertNotIn('libftdi', result.output)
+ self.assertTrue(stampprefix, 'Unable to get STAMP value for recipe libftdi')
+ matches = glob.glob(stampprefix + '*')
+ self.assertFalse(matches, 'Stamp files exist for recipe libftdi that should have been cleaned')
+ self.assertFalse(os.path.isfile(os.path.join(staging_libdir, 'libftdi1.so.2.1.0')), 'libftdi binary still found in STAGING_LIBDIR after cleaning')
+
+ @OETestID(1160)
+ def test_devtool_add_fetch(self):
+ # Fetch source
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ testver = '0.23'
+ url = 'https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-%s.tar.gz' % testver
+ testrecipe = 'python-markupsafe'
+ srcdir = os.path.join(tempdir, testrecipe)
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add %s %s -f %s' % (testrecipe, srcdir, url))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. %s' % result.output)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'setup.py')), 'Unable to find setup.py in source directory')
+ self.assertTrue(os.path.isdir(os.path.join(srcdir, '.git')), 'git repository for external source tree was not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(srcdir, result.output)
+ # Check recipe
+ recipefile = get_bb_var('FILE', testrecipe)
+ self.assertIn('%s_%s.bb' % (testrecipe, testver), recipefile, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/MarkupSafe-${PV}'
+ checkvars['SRC_URI'] = url.replace(testver, '${PV}')
+ self._test_recipe_contents(recipefile, checkvars, [])
+ # Try with version specified
+ result = runCmd('devtool reset -n %s' % testrecipe)
+ shutil.rmtree(srcdir)
+ fakever = '1.9'
+ result = runCmd('devtool add %s %s -f %s -V %s' % (testrecipe, srcdir, url, fakever))
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'setup.py')), 'Unable to find setup.py in source directory')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(srcdir, result.output)
+ # Check recipe
+ recipefile = get_bb_var('FILE', testrecipe)
+ self.assertIn('%s_%s.bb' % (testrecipe, fakever), recipefile, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/MarkupSafe-%s' % testver
+ checkvars['SRC_URI'] = url
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ @OETestID(1161)
+ def test_devtool_add_fetch_git(self):
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ url = 'gitsm://git.yoctoproject.org/mraa'
+ checkrev = 'ae127b19a50aa54255e4330ccfdd9a5d058e581d'
+ testrecipe = 'mraa'
+ srcdir = os.path.join(tempdir, testrecipe)
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake -c cleansstate %s' % testrecipe)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add %s %s -a -f %s' % (testrecipe, srcdir, url))
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created: %s' % result.output)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'imraa', 'imraa.c')), 'Unable to find imraa/imraa.c in source directory')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(srcdir, result.output)
+ # Check recipe
+ recipefile = get_bb_var('FILE', testrecipe)
+ self.assertIn('_git.bb', recipefile, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/git'
+ checkvars['PV'] = '1.0+git${SRCPV}'
+ checkvars['SRC_URI'] = url
+ checkvars['SRCREV'] = '${AUTOREV}'
+ self._test_recipe_contents(recipefile, checkvars, [])
+ # Try with revision and version specified
+ result = runCmd('devtool reset -n %s' % testrecipe)
+ shutil.rmtree(srcdir)
+ url_rev = '%s;rev=%s' % (url, checkrev)
+ result = runCmd('devtool add %s %s -f "%s" -V 1.5' % (testrecipe, srcdir, url_rev))
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'imraa', 'imraa.c')), 'Unable to find imraa/imraa.c in source directory')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(srcdir, result.output)
+ # Check recipe
+ recipefile = get_bb_var('FILE', testrecipe)
+ self.assertIn('_git.bb', recipefile, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/git'
+ checkvars['PV'] = '1.5+git${SRCPV}'
+ checkvars['SRC_URI'] = url
+ checkvars['SRCREV'] = checkrev
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ @OETestID(1391)
+ def test_devtool_add_fetch_simple(self):
+ # Fetch source from a remote URL, auto-detecting name
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ testver = '1.6.0'
+ url = 'http://www.ivarch.com/programs/sources/pv-%s.tar.bz2' % testver
+ testrecipe = 'pv'
+ srcdir = os.path.join(self.workspacedir, 'sources', testrecipe)
+ # Test devtool add
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool add %s' % url)
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. %s' % result.output)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure')), 'Unable to find configure script in source directory')
+ self.assertTrue(os.path.isdir(os.path.join(srcdir, '.git')), 'git repository for external source tree was not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(srcdir, result.output)
+ # Check recipe
+ recipefile = get_bb_var('FILE', testrecipe)
+ self.assertIn('%s_%s.bb' % (testrecipe, testver), recipefile, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['S'] = None
+ checkvars['SRC_URI'] = url.replace(testver, '${PV}')
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ @OETestID(1164)
+ def test_devtool_modify(self):
+ import oe.path
+
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean mdadm')
+ result = runCmd('devtool modify mdadm -x %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'mdadm_*.bbappend'))
+ self.assertTrue(matches, 'bbappend not created %s' % result.output)
+
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn('mdadm', result.output)
+ self.assertIn(tempdir, result.output)
+ self._check_src_repo(tempdir)
+
+ bitbake('mdadm -C unpack')
+
+ def check_line(checkfile, expected, message, present=True):
+ # Check for $expected, on a line on its own, in checkfile.
+ with open(checkfile, 'r') as f:
+ if present:
+ self.assertIn(expected + '\n', f, message)
+ else:
+ self.assertNotIn(expected + '\n', f, message)
+
+ modfile = os.path.join(tempdir, 'mdadm.8.in')
+ bb_vars = get_bb_vars(['PKGD', 'mandir'], 'mdadm')
+ pkgd = bb_vars['PKGD']
+ self.assertTrue(pkgd, 'Could not query PKGD variable')
+ mandir = bb_vars['mandir']
+ self.assertTrue(mandir, 'Could not query mandir variable')
+ manfile = oe.path.join(pkgd, mandir, 'man8', 'mdadm.8')
+
+ check_line(modfile, 'Linux Software RAID', 'Could not find initial string')
+ check_line(modfile, 'antique pin sardine', 'Unexpectedly found replacement string', present=False)
+
+ result = runCmd("sed -i 's!^Linux Software RAID$!antique pin sardine!' %s" % modfile)
+ check_line(modfile, 'antique pin sardine', 'mdadm.8.in file not modified (sed failed)')
+
+ bitbake('mdadm -c package')
+ check_line(manfile, 'antique pin sardine', 'man file not modified. man searched file path: %s' % manfile)
+
+ result = runCmd('git checkout -- %s' % modfile, cwd=tempdir)
+ check_line(modfile, 'Linux Software RAID', 'man .in file not restored (git failed)')
+
+ bitbake('mdadm -c package')
+ check_line(manfile, 'Linux Software RAID', 'man file not updated. man searched file path: %s' % manfile)
+
+ result = runCmd('devtool reset mdadm')
+ result = runCmd('devtool status')
+ self.assertNotIn('mdadm', result.output)
+
+ @OETestID(1620)
+ def test_devtool_buildclean(self):
+ def assertFile(path, *paths):
+ f = os.path.join(path, *paths)
+ self.assertExists(f)
+ def assertNoFile(path, *paths):
+ f = os.path.join(path, *paths)
+ self.assertNotExists(f)
+
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('mdadm m4 -c cleansstate')
+ # Try modifying a recipe
+ tempdir_mdadm = tempfile.mkdtemp(prefix='devtoolqa')
+ tempdir_m4 = tempfile.mkdtemp(prefix='devtoolqa')
+ builddir_m4 = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir_mdadm)
+ self.track_for_cleanup(tempdir_m4)
+ self.track_for_cleanup(builddir_m4)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean mdadm m4')
+ self.write_recipeinc('m4', 'EXTERNALSRC_BUILD = "%s"\ndo_clean() {\n\t:\n}\n' % builddir_m4)
+ try:
+ runCmd('devtool modify mdadm -x %s' % tempdir_mdadm)
+ runCmd('devtool modify m4 -x %s' % tempdir_m4)
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ assertNoFile(builddir_m4, 'src/m4')
+ result = bitbake('m4 -e')
+ result = bitbake('mdadm m4 -c compile')
+ self.assertEqual(result.status, 0)
+ assertFile(tempdir_mdadm, 'mdadm')
+ assertFile(builddir_m4, 'src/m4')
+ # Check that buildclean task exists and does call make clean
+ bitbake('mdadm m4 -c buildclean')
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ assertNoFile(builddir_m4, 'src/m4')
+ bitbake('mdadm m4 -c compile')
+ assertFile(tempdir_mdadm, 'mdadm')
+ assertFile(builddir_m4, 'src/m4')
+ bitbake('mdadm m4 -c clean')
+ # Check that buildclean task is run before clean for B == S
+ assertNoFile(tempdir_mdadm, 'mdadm')
+ # Check that buildclean task is not run before clean for B != S
+ assertFile(builddir_m4, 'src/m4')
+ finally:
+ self.delete_recipeinc('m4')
+
+ @OETestID(1166)
+ def test_devtool_modify_invalid(self):
+ # Try modifying some recipes
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ testrecipes = 'perf kernel-devsrc package-index core-image-minimal meta-toolchain packagegroup-core-sdk meta-ide-support'.split()
+ # Find actual name of gcc-source since it now includes the version - crude, but good enough for this purpose
+ result = runCmd('bitbake-layers show-recipes gcc-source*')
+ for line in result.output.splitlines():
+ # just match those lines that contain a real target
+ m = re.match('(?P<recipe>^[a-zA-Z0-9.-]+)(?P<colon>:$)', line)
+ if m:
+ testrecipes.append(m.group('recipe'))
+ for testrecipe in testrecipes:
+ # Check it's a valid recipe
+ bitbake('%s -e' % testrecipe)
+ # devtool extract should fail
+ result = runCmd('devtool extract %s %s' % (testrecipe, os.path.join(tempdir, testrecipe)), ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'devtool extract on %s should have failed. devtool output: %s' % (testrecipe, result.output))
+ self.assertNotIn('Fetching ', result.output, 'devtool extract on %s should have errored out before trying to fetch' % testrecipe)
+ self.assertIn('ERROR: ', result.output, 'devtool extract on %s should have given an ERROR' % testrecipe)
+ # devtool modify should fail
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, os.path.join(tempdir, testrecipe)), ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'devtool modify on %s should have failed. devtool output: %s' % (testrecipe, result.output))
+ self.assertIn('ERROR: ', result.output, 'devtool modify on %s should have given an ERROR' % testrecipe)
+
+ @OETestID(1365)
+ def test_devtool_modify_native(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ # Try modifying some recipes
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ bbclassextended = False
+ inheritnative = False
+ testrecipes = 'mtools-native apt-native desktop-file-utils-native'.split()
+ for testrecipe in testrecipes:
+ checkextend = 'native' in (get_bb_var('BBCLASSEXTEND', testrecipe) or '').split()
+ if not bbclassextended:
+ bbclassextended = checkextend
+ if not inheritnative:
+ inheritnative = not checkextend
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, os.path.join(tempdir, testrecipe)))
+ self.assertNotIn('ERROR: ', result.output, 'ERROR in devtool modify output: %s' % result.output)
+ result = runCmd('devtool build %s' % testrecipe)
+ self.assertNotIn('ERROR: ', result.output, 'ERROR in devtool build output: %s' % result.output)
+ result = runCmd('devtool reset %s' % testrecipe)
+ self.assertNotIn('ERROR: ', result.output, 'ERROR in devtool reset output: %s' % result.output)
+
+ self.assertTrue(bbclassextended, 'None of these recipes are BBCLASSEXTENDed to native - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
+ self.assertTrue(inheritnative, 'None of these recipes do "inherit native" - need to adjust testrecipes list: %s' % ', '.join(testrecipes))
+
+
+ @OETestID(1165)
+ def test_devtool_modify_git(self):
+ # Check preconditions
+ testrecipe = 'psplash'
+ src_uri = get_bb_var('SRC_URI', testrecipe)
+ self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('%s -c cleansstate' % testrecipe)
+ # Try modifying a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created. devtool output: %s' % result.output)
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', 'psplash_*.bbappend'))
+ self.assertTrue(matches, 'bbappend not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Try building
+ bitbake(testrecipe)
+
+ @OETestID(1167)
+ def test_devtool_modify_localfiles(self):
+ # Check preconditions
+ testrecipe = 'lighttpd'
+ src_uri = (get_bb_var('SRC_URI', testrecipe) or '').split()
+ foundlocal = False
+ for item in src_uri:
+ if item.startswith('file://') and '.patch' not in item:
+ foundlocal = True
+ break
+ self.assertTrue(foundlocal, 'This test expects the %s recipe to fetch local files and it seems that it no longer does' % testrecipe)
+ # Clean up anything in the workdir/sysroot/sstate cache
+ bitbake('%s -c cleansstate' % testrecipe)
+ # Try modifying a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ self.assertExists(os.path.join(tempdir, 'configure.ac'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % testrecipe))
+ self.assertTrue(matches, 'bbappend not created')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(testrecipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Try building
+ bitbake(testrecipe)
+
+ @OETestID(1378)
+ def test_devtool_modify_virtual(self):
+ # Try modifying a virtual recipe
+ virtrecipe = 'virtual/make'
+ realrecipe = 'make'
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (virtrecipe, tempdir))
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self.assertExists(os.path.join(self.workspacedir, 'conf', 'layer.conf'), 'Workspace directory not created')
+ matches = glob.glob(os.path.join(self.workspacedir, 'appends', '%s_*.bbappend' % realrecipe))
+ self.assertTrue(matches, 'bbappend not created %s' % result.output)
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertNotIn(virtrecipe, result.output)
+ self.assertIn(realrecipe, result.output)
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # This is probably sufficient
+
+
+ @OETestID(1169)
+ def test_devtool_update_recipe(self):
+ # Check preconditions
+ testrecipe = 'minicom'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # First, modify a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ # We don't use -x here so that we test the behaviour of devtool modify without it
+ result = runCmd('devtool modify %s %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Add a couple of commits
+ # FIXME: this only tests adding, need to also test update and remove
+ result = runCmd('echo "Additional line" >> README', cwd=tempdir)
+ result = runCmd('git commit -a -m "Change the README"', cwd=tempdir)
+ result = runCmd('echo "A new file" > devtool-new-file', cwd=tempdir)
+ result = runCmd('git add devtool-new-file', cwd=tempdir)
+ result = runCmd('git commit -m "Add a new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; rm %s/*.patch; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ ('??', '.*/0001-Change-the-README.patch$'),
+ ('??', '.*/0002-Add-a-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1172)
+ def test_devtool_update_recipe_git(self):
+ # Check preconditions
+ testrecipe = 'mtd-utils'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
+ patches = []
+ for entry in src_uri.split():
+ if entry.startswith('file://') and entry.endswith('.patch'):
+ patches.append(entry[7:].split(';')[0])
+ self.assertGreater(len(patches), 0, 'The %s recipe does not appear to contain any patches, so this test will not be effective' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # First, modify a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Add a couple of commits
+ # FIXME: this only tests adding, need to also test update and remove
+ result = runCmd('echo "# Additional line" >> Makefile.am', cwd=tempdir)
+ result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempdir)
+ result = runCmd('echo "A new file" > devtool-new-file', cwd=tempdir)
+ result = runCmd('git add devtool-new-file', cwd=tempdir)
+ result = runCmd('git commit -m "Add a new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; rm -rf %s; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe -m srcrev %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile))] + \
+ [(' D', '.*/%s$' % patch) for patch in patches]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ result = runCmd('git diff %s' % os.path.basename(recipefile), cwd=os.path.dirname(recipefile))
+ addlines = ['SRCREV = ".*"', 'SRC_URI = "git://git.infradead.org/mtd-utils.git"']
+ srcurilines = src_uri.split()
+ srcurilines[0] = 'SRC_URI = "' + srcurilines[0]
+ srcurilines.append('"')
+ removelines = ['SRCREV = ".*"'] + srcurilines
+ for line in result.output.splitlines():
+ if line.startswith('+++') or line.startswith('---'):
+ continue
+ elif line.startswith('+'):
+ matched = False
+ for item in addlines:
+ if re.match(item, line[1:].strip()):
+ matched = True
+ break
+ self.assertTrue(matched, 'Unexpected diff add line: %s' % line)
+ elif line.startswith('-'):
+ matched = False
+ for item in removelines:
+ if re.match(item, line[1:].strip()):
+ matched = True
+ break
+ self.assertTrue(matched, 'Unexpected diff remove line: %s' % line)
+ # Now try with auto mode
+ runCmd('cd %s; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ result = runCmd('git rev-parse --show-toplevel', cwd=os.path.dirname(recipefile))
+ topleveldir = result.output.strip()
+ relpatchpath = os.path.join(os.path.relpath(os.path.dirname(recipefile), topleveldir), testrecipe)
+ expected_status = [(' M', os.path.relpath(recipefile, topleveldir)),
+ ('??', '%s/0001-Change-the-Makefile.patch' % relpatchpath),
+ ('??', '%s/0002-Add-a-new-file.patch' % relpatchpath)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1170)
+ def test_devtool_update_recipe_append(self):
+ # Check preconditions
+ testrecipe = 'mdadm'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ self.assertNotIn('git://', src_uri, 'This test expects the %s recipe to NOT be a git recipe' % testrecipe)
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # First, modify a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ tempsrcdir = os.path.join(tempdir, 'source')
+ templayerdir = os.path.join(tempdir, 'layer')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempsrcdir))
+ # Check git repo
+ self._check_src_repo(tempsrcdir)
+ # Add a commit
+ result = runCmd("sed 's!\\(#define VERSION\\W*\"[^\"]*\\)\"!\\1-custom\"!' -i ReadMe.c", cwd=tempsrcdir)
+ result = runCmd('git commit -a -m "Add our custom version"', cwd=tempsrcdir)
+ self.add_command_to_tearDown('cd %s; rm -f %s/*.patch; git checkout .' % (os.path.dirname(recipefile), testrecipe))
+ # Create a temporary layer and add it to bblayers.conf
+ self._create_temp_layer(templayerdir, True, 'selftestupdaterecipe')
+ # Create the bbappend
+ result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
+ self.assertNotIn('WARNING:', result.output)
+ # Check recipe is still clean
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # Check bbappend was created
+ splitpath = os.path.dirname(recipefile).split(os.sep)
+ appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
+ bbappendfile = self._check_bbappend(testrecipe, recipefile, appenddir)
+ patchfile = os.path.join(appenddir, testrecipe, '0001-Add-our-custom-version.patch')
+ self.assertExists(patchfile, 'Patch file not created')
+
+ # Check bbappend contents
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://0001-Add-our-custom-version.patch"\n',
+ '\n']
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, f.readlines())
+
+ # Check we can run it again and bbappend isn't modified
+ result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, f.readlines())
+ # Drop new commit and check patch gets deleted
+ result = runCmd('git reset HEAD^', cwd=tempsrcdir)
+ result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
+ self.assertNotExists(patchfile, 'Patch file not deleted')
+ expectedlines2 = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines2, f.readlines())
+ # Put commit back and check we can run it if layer isn't in bblayers.conf
+ os.remove(bbappendfile)
+ result = runCmd('git commit -a -m "Add our custom version"', cwd=tempsrcdir)
+ result = runCmd('bitbake-layers remove-layer %s' % templayerdir, cwd=self.builddir)
+ result = runCmd('devtool update-recipe %s -a %s' % (testrecipe, templayerdir))
+ self.assertIn('WARNING: Specified layer is not currently enabled in bblayers.conf', result.output)
+ self.assertExists(patchfile, 'Patch file not created (with disabled layer)')
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, f.readlines())
+ # Deleting isn't expected to work under these circumstances
+
+ @OETestID(1171)
+ def test_devtool_update_recipe_append_git(self):
+ # Check preconditions
+ testrecipe = 'mtd-utils'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ self.assertIn('git://', src_uri, 'This test expects the %s recipe to be a git recipe' % testrecipe)
+ for entry in src_uri.split():
+ if entry.startswith('git://'):
+ git_uri = entry
+ break
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # First, modify a recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ tempsrcdir = os.path.join(tempdir, 'source')
+ templayerdir = os.path.join(tempdir, 'layer')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempsrcdir))
+ # Check git repo
+ self._check_src_repo(tempsrcdir)
+ # Add a commit
+ result = runCmd('echo "# Additional line" >> Makefile.am', cwd=tempsrcdir)
+ result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempsrcdir)
+ self.add_command_to_tearDown('cd %s; rm -f %s/*.patch; git checkout .' % (os.path.dirname(recipefile), testrecipe))
+ # Create a temporary layer
+ os.makedirs(os.path.join(templayerdir, 'conf'))
+ with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
+ f.write('BBPATH .= ":${LAYERDIR}"\n')
+ f.write('BBFILES += "${LAYERDIR}/recipes-*/*/*.bbappend"\n')
+ f.write('BBFILE_COLLECTIONS += "oeselftesttemplayer"\n')
+ f.write('BBFILE_PATTERN_oeselftesttemplayer = "^${LAYERDIR}/"\n')
+ f.write('BBFILE_PRIORITY_oeselftesttemplayer = "999"\n')
+ f.write('BBFILE_PATTERN_IGNORE_EMPTY_oeselftesttemplayer = "1"\n')
+ f.write('LAYERSERIES_COMPAT_oeselftesttemplayer = "${LAYERSERIES_COMPAT_core}"\n')
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s || true' % templayerdir)
+ result = runCmd('bitbake-layers add-layer %s' % templayerdir, cwd=self.builddir)
+ # Create the bbappend
+ result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
+ self.assertNotIn('WARNING:', result.output)
+ # Check recipe is still clean
+ self._check_repo_status(os.path.dirname(recipefile), [])
+ # Check bbappend was created
+ splitpath = os.path.dirname(recipefile).split(os.sep)
+ appenddir = os.path.join(templayerdir, splitpath[-2], splitpath[-1])
+ bbappendfile = self._check_bbappend(testrecipe, recipefile, appenddir)
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
+
+ # Check bbappend contents
+ result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
+ expectedlines = set(['SRCREV = "%s"\n' % result.output,
+ '\n',
+ 'SRC_URI = "%s"\n' % git_uri,
+ '\n'])
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, set(f.readlines()))
+
+ # Check we can run it again and bbappend isn't modified
+ result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, set(f.readlines()))
+ # Drop new commit and check SRCREV changes
+ result = runCmd('git reset HEAD^', cwd=tempsrcdir)
+ result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
+ result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
+ expectedlines = set(['SRCREV = "%s"\n' % result.output,
+ '\n',
+ 'SRC_URI = "%s"\n' % git_uri,
+ '\n'])
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, set(f.readlines()))
+ # Put commit back and check we can run it if layer isn't in bblayers.conf
+ os.remove(bbappendfile)
+ result = runCmd('git commit -a -m "Change the Makefile"', cwd=tempsrcdir)
+ result = runCmd('bitbake-layers remove-layer %s' % templayerdir, cwd=self.builddir)
+ result = runCmd('devtool update-recipe -m srcrev %s -a %s' % (testrecipe, templayerdir))
+ self.assertIn('WARNING: Specified layer is not currently enabled in bblayers.conf', result.output)
+ self.assertNotExists(os.path.join(appenddir, testrecipe), 'Patch directory should not be created')
+ result = runCmd('git rev-parse HEAD', cwd=tempsrcdir)
+ expectedlines = set(['SRCREV = "%s"\n' % result.output,
+ '\n',
+ 'SRC_URI = "%s"\n' % git_uri,
+ '\n'])
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, set(f.readlines()))
+ # Deleting isn't expected to work under these circumstances
+
+ @OETestID(1370)
+ def test_devtool_update_recipe_local_files(self):
+ """Check that local source files are copied over instead of patched"""
+ testrecipe = 'makedevs'
+ recipefile = get_bb_var('FILE', testrecipe)
+ # Setup srctree for modifying the recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be
+ # building it)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Try building just to ensure we haven't broken that
+ bitbake("%s" % testrecipe)
+ # Edit / commit local source
+ runCmd('echo "/* Foobar */" >> oe-local-files/makedevs.c', cwd=tempdir)
+ runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
+ runCmd('echo "Bar" > new-file', cwd=tempdir)
+ runCmd('git add new-file', cwd=tempdir)
+ runCmd('git commit -m "Add new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; git clean -fd .; git checkout .' %
+ os.path.dirname(recipefile))
+ runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ (' M', '.*/makedevs/makedevs.c$'),
+ ('??', '.*/makedevs/new-local$'),
+ ('??', '.*/makedevs/0001-Add-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1371)
+ def test_devtool_update_recipe_local_files_2(self):
+ """Check local source files support when oe-local-files is in Git"""
+ testrecipe = 'devtool-test-local'
+ recipefile = get_bb_var('FILE', testrecipe)
+ recipedir = os.path.dirname(recipefile)
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains uncommitted changes' % testrecipe)
+ # Setup srctree for modifying the recipe
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Check git repo
+ self._check_src_repo(tempdir)
+ # Add oe-local-files to Git
+ runCmd('rm oe-local-files/.gitignore', cwd=tempdir)
+ runCmd('git add oe-local-files', cwd=tempdir)
+ runCmd('git commit -m "Add local sources"', cwd=tempdir)
+ # Edit / commit local sources
+ runCmd('echo "# Foobar" >> oe-local-files/file1', cwd=tempdir)
+ runCmd('git commit -am "Edit existing file"', cwd=tempdir)
+ runCmd('git rm oe-local-files/file2', cwd=tempdir)
+ runCmd('git commit -m"Remove file"', cwd=tempdir)
+ runCmd('echo "Foo" > oe-local-files/new-local', cwd=tempdir)
+ runCmd('git add oe-local-files/new-local', cwd=tempdir)
+ runCmd('git commit -m "Add new local file"', cwd=tempdir)
+ runCmd('echo "Gar" > new-file', cwd=tempdir)
+ runCmd('git add new-file', cwd=tempdir)
+ runCmd('git commit -m "Add new file"', cwd=tempdir)
+ self.add_command_to_tearDown('cd %s; git clean -fd .; git checkout .' %
+ os.path.dirname(recipefile))
+ # Checkout unmodified file to working copy -> devtool should still pick
+ # the modified version from HEAD
+ runCmd('git checkout HEAD^ -- oe-local-files/file1', cwd=tempdir)
+ runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s$' % os.path.basename(recipefile)),
+ (' M', '.*/file1$'),
+ (' D', '.*/file2$'),
+ ('??', '.*/new-local$'),
+ ('??', '.*/0001-Add-new-file.patch$')]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1627)
+ def test_devtool_update_recipe_local_files_3(self):
+ # First, modify the recipe
+ testrecipe = 'devtool-test-localonly'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ # Modify one file
+ runCmd('echo "Another line" >> file2', cwd=os.path.join(self.workspacedir, 'sources', testrecipe, 'oe-local-files'))
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s/file2$' % testrecipe)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1629)
+ def test_devtool_update_recipe_local_patch_gz(self):
+ # First, modify the recipe
+ testrecipe = 'devtool-test-patch-gz'
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest("The DISTRO 'poky-tiny' does not provide the dependencies needed by %s" % testrecipe)
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ # Modify one file
+ srctree = os.path.join(self.workspacedir, 'sources', testrecipe)
+ runCmd('echo "Another line" >> README', cwd=srctree)
+ runCmd('git commit -a --amend --no-edit', cwd=srctree)
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = [(' M', '.*/%s/readme.patch.gz$' % testrecipe)]
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+ patch_gz = os.path.join(os.path.dirname(recipefile), testrecipe, 'readme.patch.gz')
+ result = runCmd('file %s' % patch_gz)
+ if 'gzip compressed data' not in result.output:
+ self.fail('New patch file is not gzipped - file reports:\n%s' % result.output)
+
+ @OETestID(1628)
+ def test_devtool_update_recipe_local_files_subdir(self):
+ # Try devtool update-recipe on a recipe that has a file with subdir= set in
+ # SRC_URI such that it overwrites a file that was in an archive that
+ # was also in SRC_URI
+ # First, modify the recipe
+ testrecipe = 'devtool-test-subdir'
+ bb_vars = get_bb_vars(['FILE', 'SRC_URI'], testrecipe)
+ recipefile = bb_vars['FILE']
+ src_uri = bb_vars['SRC_URI']
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # (don't bother with cleaning the recipe on teardown, we won't be building it)
+ result = runCmd('devtool modify %s' % testrecipe)
+ testfile = os.path.join(self.workspacedir, 'sources', testrecipe, 'testfile')
+ self.assertExists(testfile, 'Extracted source could not be found')
+ with open(testfile, 'r') as f:
+ contents = f.read().rstrip()
+ self.assertEqual(contents, 'Modified version', 'File has apparently not been overwritten as it should have been')
+ # Test devtool update-recipe without modifying any files
+ self.add_command_to_tearDown('cd %s; rm %s/*; git checkout %s %s' % (os.path.dirname(recipefile), testrecipe, testrecipe, os.path.basename(recipefile)))
+ result = runCmd('devtool update-recipe %s' % testrecipe)
+ expected_status = []
+ self._check_repo_status(os.path.dirname(recipefile), expected_status)
+
+ @OETestID(1163)
+ def test_devtool_extract(self):
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ # Try devtool extract
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool extract matchbox-terminal %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self._check_src_repo(tempdir)
+
+ @OETestID(1379)
+ def test_devtool_extract_virtual(self):
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ # Try devtool extract
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool extract virtual/make %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile.am'), 'Extracted source could not be found')
+ self._check_src_repo(tempdir)
+
+ @OETestID(1168)
+ def test_devtool_reset_all(self):
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ testrecipe1 = 'mdadm'
+ testrecipe2 = 'cronie'
+ result = runCmd('devtool modify -x %s %s' % (testrecipe1, os.path.join(tempdir, testrecipe1)))
+ result = runCmd('devtool modify -x %s %s' % (testrecipe2, os.path.join(tempdir, testrecipe2)))
+ result = runCmd('devtool build %s' % testrecipe1)
+ result = runCmd('devtool build %s' % testrecipe2)
+ stampprefix1 = get_bb_var('STAMP', testrecipe1)
+ self.assertTrue(stampprefix1, 'Unable to get STAMP value for recipe %s' % testrecipe1)
+ stampprefix2 = get_bb_var('STAMP', testrecipe2)
+ self.assertTrue(stampprefix2, 'Unable to get STAMP value for recipe %s' % testrecipe2)
+ result = runCmd('devtool reset -a')
+ self.assertIn(testrecipe1, result.output)
+ self.assertIn(testrecipe2, result.output)
+ result = runCmd('devtool status')
+ self.assertNotIn(testrecipe1, result.output)
+ self.assertNotIn(testrecipe2, result.output)
+ matches1 = glob.glob(stampprefix1 + '*')
+ self.assertFalse(matches1, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe1)
+ matches2 = glob.glob(stampprefix2 + '*')
+ self.assertFalse(matches2, 'Stamp files exist for recipe %s that should have been cleaned' % testrecipe2)
+
+ @OETestID(1272)
+ def test_devtool_deploy_target(self):
+ # NOTE: Whilst this test would seemingly be better placed as a runtime test,
+ # unfortunately the runtime tests run under bitbake and you can't run
+ # devtool within bitbake (since devtool needs to run bitbake itself).
+ # Additionally we are testing build-time functionality as well, so
+ # really this has to be done as an oe-selftest test.
+ #
+ # Check preconditions
+ machine = get_bb_var('MACHINE')
+ if not machine.startswith('qemu'):
+ self.skipTest('This test only works with qemu machines')
+ if not os.path.exists('/etc/runqemu-nosudo'):
+ self.skipTest('You must set up tap devices with scripts/runqemu-gen-tapdevs before running this test')
+ result = runCmd('PATH="$PATH:/sbin:/usr/sbin" ip tuntap show', ignore_status=True)
+ if result.status != 0:
+ result = runCmd('PATH="$PATH:/sbin:/usr/sbin" ifconfig -a', ignore_status=True)
+ if result.status != 0:
+ self.skipTest('Failed to determine if tap devices exist with ifconfig or ip: %s' % result.output)
+ for line in result.output.splitlines():
+ if line.startswith('tap'):
+ break
+ else:
+ self.skipTest('No tap devices found - you must set up tap devices with scripts/runqemu-gen-tapdevs before running this test')
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ # Definitions
+ testrecipe = 'mdadm'
+ testfile = '/sbin/mdadm'
+ testimage = 'oe-selftest-image'
+ testcommand = '/sbin/mdadm --help'
+ # Build an image to run
+ bitbake("%s qemu-native qemu-helper-native" % testimage)
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ self.add_command_to_tearDown('bitbake -c clean %s' % testimage)
+ self.add_command_to_tearDown('rm -f %s/%s*' % (deploy_dir_image, testimage))
+ # Clean recipe so the first deploy will fail
+ bitbake("%s -c clean" % testrecipe)
+ # Try devtool modify
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean %s' % testrecipe)
+ result = runCmd('devtool modify %s -x %s' % (testrecipe, tempdir))
+ # Test that deploy-target at this point fails (properly)
+ result = runCmd('devtool deploy-target -n %s root@localhost' % testrecipe, ignore_status=True)
+ self.assertNotEqual(result.output, 0, 'devtool deploy-target should have failed, output: %s' % result.output)
+ self.assertNotIn(result.output, 'Traceback', 'devtool deploy-target should have failed with a proper error not a traceback, output: %s' % result.output)
+ result = runCmd('devtool build %s' % testrecipe)
+ # First try a dry-run of deploy-target
+ result = runCmd('devtool deploy-target -n %s root@localhost' % testrecipe)
+ self.assertIn(' %s' % testfile, result.output)
+ # Boot the image
+ with runqemu(testimage) as qemu:
+ # Now really test deploy-target
+ result = runCmd('devtool deploy-target -c %s root@%s' % (testrecipe, qemu.ip))
+ # Run a test command to see if it was installed properly
+ sshargs = '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand))
+ # Check if it deployed all of the files with the right ownership/perms
+ # First look on the host - need to do this under pseudo to get the correct ownership/perms
+ bb_vars = get_bb_vars(['D', 'FAKEROOTENV', 'FAKEROOTCMD'], testrecipe)
+ installdir = bb_vars['D']
+ fakerootenv = bb_vars['FAKEROOTENV']
+ fakerootcmd = bb_vars['FAKEROOTCMD']
+ result = runCmd('%s %s find . -type f -exec ls -l {} \;' % (fakerootenv, fakerootcmd), cwd=installdir)
+ filelist1 = self._process_ls_output(result.output)
+
+ # Now look on the target
+ tempdir2 = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir2)
+ tmpfilelist = os.path.join(tempdir2, 'files.txt')
+ with open(tmpfilelist, 'w') as f:
+ for line in filelist1:
+ splitline = line.split()
+ f.write(splitline[-1] + '\n')
+ result = runCmd('cat %s | ssh -q %s root@%s \'xargs ls -l\'' % (tmpfilelist, sshargs, qemu.ip))
+ filelist2 = self._process_ls_output(result.output)
+ filelist1.sort(key=lambda item: item.split()[-1])
+ filelist2.sort(key=lambda item: item.split()[-1])
+ self.assertEqual(filelist1, filelist2)
+ # Test undeploy-target
+ result = runCmd('devtool undeploy-target -c %s root@%s' % (testrecipe, qemu.ip))
+ result = runCmd('ssh %s root@%s %s' % (sshargs, qemu.ip, testcommand), ignore_status=True)
+ self.assertNotEqual(result, 0, 'undeploy-target did not remove command as it should have')
+
+ @OETestID(1366)
+ def test_devtool_build_image(self):
+ """Test devtool build-image plugin"""
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ image = 'core-image-minimal'
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean %s' % image)
+ bitbake('%s -c clean' % image)
+ # Add target and native recipes to workspace
+ recipes = ['mdadm', 'parted-native']
+ for recipe in recipes:
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.add_command_to_tearDown('bitbake -c clean %s' % recipe)
+ runCmd('devtool modify %s -x %s' % (recipe, tempdir))
+ # Try to build image
+ result = runCmd('devtool build-image %s' % image)
+ self.assertNotEqual(result, 0, 'devtool build-image failed')
+ # Check if image contains expected packages
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ image_link_name = get_bb_var('IMAGE_LINK_NAME', image)
+ reqpkgs = [item for item in recipes if not item.endswith('-native')]
+ with open(os.path.join(deploy_dir_image, image_link_name + '.manifest'), 'r') as f:
+ for line in f:
+ splitval = line.split()
+ if splitval:
+ pkg = splitval[0]
+ if pkg in reqpkgs:
+ reqpkgs.remove(pkg)
+ if reqpkgs:
+ self.fail('The following packages were not present in the image as expected: %s' % ', '.join(reqpkgs))
+
+ @OETestID(1367)
+ def test_devtool_upgrade(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # Check parameters
+ result = runCmd('devtool upgrade -h')
+ for param in 'recipename srctree --version -V --branch -b --keep-temp --no-patch'.split():
+ self.assertIn(param, result.output)
+ # For the moment, we are using a real recipe.
+ recipe = 'devtool-upgrade-test1'
+ version = '1.6.0'
+ oldrecipefile = get_bb_var('FILE', recipe)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ # Check that recipe is not already under devtool control
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output)
+ # Check upgrade. Code does not check if new PV is older or newer that current PV, so, it may be that
+ # we are downgrading instead of upgrading.
+ result = runCmd('devtool upgrade %s %s -V %s' % (recipe, tempdir, version))
+ # Check if srctree at least is populated
+ self.assertTrue(len(os.listdir(tempdir)) > 0, 'srctree (%s) should be populated with new (%s) source code' % (tempdir, version))
+ # Check new recipe subdirectory is present
+ self.assertExists(os.path.join(self.workspacedir, 'recipes', recipe, '%s-%s' % (recipe, version)), 'Recipe folder should exist')
+ # Check new recipe file is present
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', recipe, '%s_%s.bb' % (recipe, version))
+ self.assertExists(newrecipefile, 'Recipe file should exist after upgrade')
+ # Check devtool status and make sure recipe is present
+ result = runCmd('devtool status')
+ self.assertIn(recipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Check recipe got changed as expected
+ with open(oldrecipefile + '.upgraded', 'r') as f:
+ desiredlines = f.readlines()
+ with open(newrecipefile, 'r') as f:
+ newlines = f.readlines()
+ self.assertEqual(desiredlines, newlines)
+ # Check devtool reset recipe
+ result = runCmd('devtool reset %s -n' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output)
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after resetting')
+
+ @OETestID(1433)
+ def test_devtool_upgrade_git(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ recipe = 'devtool-upgrade-test2'
+ commit = '6cc6077a36fe2648a5f993fe7c16c9632f946517'
+ oldrecipefile = get_bb_var('FILE', recipe)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ # Check that recipe is not already under devtool control
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output)
+ # Check upgrade
+ result = runCmd('devtool upgrade %s %s -S %s' % (recipe, tempdir, commit))
+ # Check if srctree at least is populated
+ self.assertTrue(len(os.listdir(tempdir)) > 0, 'srctree (%s) should be populated with new (%s) source code' % (tempdir, commit))
+ # Check new recipe file is present
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', recipe, os.path.basename(oldrecipefile))
+ self.assertExists(newrecipefile, 'Recipe file should exist after upgrade')
+ # Check devtool status and make sure recipe is present
+ result = runCmd('devtool status')
+ self.assertIn(recipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Check recipe got changed as expected
+ with open(oldrecipefile + '.upgraded', 'r') as f:
+ desiredlines = f.readlines()
+ with open(newrecipefile, 'r') as f:
+ newlines = f.readlines()
+ self.assertEqual(desiredlines, newlines)
+ # Check devtool reset recipe
+ result = runCmd('devtool reset %s -n' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output)
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after resetting')
+
+ @OETestID(1352)
+ def test_devtool_layer_plugins(self):
+ """Test that devtool can use plugins from other layers.
+
+ This test executes the selftest-reverse command from meta-selftest."""
+
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ s = "Microsoft Made No Profit From Anyone's Zunes Yo"
+ result = runCmd("devtool --quiet selftest-reverse \"%s\"" % s)
+ self.assertEqual(result.output, s[::-1])
+
+ def _copy_file_with_cleanup(self, srcfile, basedstdir, *paths):
+ dstdir = basedstdir
+ self.assertExists(dstdir)
+ for p in paths:
+ dstdir = os.path.join(dstdir, p)
+ if not os.path.exists(dstdir):
+ os.makedirs(dstdir)
+ self.track_for_cleanup(dstdir)
+ dstfile = os.path.join(dstdir, os.path.basename(srcfile))
+ if srcfile != dstfile:
+ shutil.copy(srcfile, dstfile)
+ self.track_for_cleanup(dstfile)
+
+ @OETestID(1625)
+ def test_devtool_load_plugin(self):
+ """Test that devtool loads only the first found plugin in BBPATH."""
+
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ devtool = runCmd("which devtool")
+ fromname = runCmd("devtool --quiet pluginfile")
+ srcfile = fromname.output
+ bbpath = get_bb_var('BBPATH')
+ searchpath = bbpath.split(':') + [os.path.dirname(devtool.output)]
+ plugincontent = []
+ with open(srcfile) as fh:
+ plugincontent = fh.readlines()
+ try:
+ self.assertIn('meta-selftest', srcfile, 'wrong bbpath plugin found')
+ for path in searchpath:
+ self._copy_file_with_cleanup(srcfile, path, 'lib', 'devtool')
+ result = runCmd("devtool --quiet count")
+ self.assertEqual(result.output, '1')
+ result = runCmd("devtool --quiet multiloaded")
+ self.assertEqual(result.output, "no")
+ for path in searchpath:
+ result = runCmd("devtool --quiet bbdir")
+ self.assertEqual(result.output, path)
+ os.unlink(os.path.join(result.output, 'lib', 'devtool', 'bbpath.py'))
+ finally:
+ with open(srcfile, 'w') as fh:
+ fh.writelines(plugincontent)
+
+ def _setup_test_devtool_finish_upgrade(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ # Use a "real" recipe from meta-selftest
+ recipe = 'devtool-upgrade-test1'
+ oldversion = '1.5.3'
+ newversion = '1.6.0'
+ oldrecipefile = get_bb_var('FILE', recipe)
+ recipedir = os.path.dirname(oldrecipefile)
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains uncommitted changes' % recipe)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ # Check that recipe is not already under devtool control
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output)
+ # Do the upgrade
+ result = runCmd('devtool upgrade %s %s -V %s' % (recipe, tempdir, newversion))
+ # Check devtool status and make sure recipe is present
+ result = runCmd('devtool status')
+ self.assertIn(recipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Make a change to the source
+ result = runCmd('sed -i \'/^#include "pv.h"/a \\/* Here is a new comment *\\/\' src/pv/number.c', cwd=tempdir)
+ result = runCmd('git status --porcelain', cwd=tempdir)
+ self.assertIn('M src/pv/number.c', result.output)
+ result = runCmd('git commit src/pv/number.c -m "Add a comment to the code"', cwd=tempdir)
+ # Check if patch is there
+ recipedir = os.path.dirname(oldrecipefile)
+ olddir = os.path.join(recipedir, recipe + '-' + oldversion)
+ patchfn = '0001-Add-a-note-line-to-the-quick-reference.patch'
+ self.assertExists(os.path.join(olddir, patchfn), 'Original patch file does not exist')
+ return recipe, oldrecipefile, recipedir, olddir, newversion, patchfn
+
+ @OETestID(1623)
+ def test_devtool_finish_upgrade_origlayer(self):
+ recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
+ # Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
+ self.assertIn('/meta-selftest/', recipedir)
+ # Try finish to the original layer
+ self.add_command_to_tearDown('rm -rf %s ; cd %s ; git checkout %s' % (recipedir, os.path.dirname(recipedir), recipedir))
+ result = runCmd('devtool finish %s meta-selftest' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ self.assertNotExists(oldrecipefile, 'Old recipe file should have been deleted but wasn\'t')
+ self.assertNotExists(os.path.join(olddir, patchfn), 'Old patch file should have been deleted but wasn\'t')
+ newrecipefile = os.path.join(recipedir, '%s_%s.bb' % (recipe, newversion))
+ newdir = os.path.join(recipedir, recipe + '-' + newversion)
+ self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
+ self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
+ self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
+
+ @OETestID(1624)
+ def test_devtool_finish_upgrade_otherlayer(self):
+ recipe, oldrecipefile, recipedir, olddir, newversion, patchfn = self._setup_test_devtool_finish_upgrade()
+ # Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
+ self.assertIn('/meta-selftest/', recipedir)
+ # Try finish to a different layer - should create a bbappend
+ # This cleanup isn't strictly necessary but do it anyway just in case it goes wrong and writes to here
+ self.add_command_to_tearDown('rm -rf %s ; cd %s ; git checkout %s' % (recipedir, os.path.dirname(recipedir), recipedir))
+ oe_core_dir = os.path.join(get_bb_var('COREBASE'), 'meta')
+ newrecipedir = os.path.join(oe_core_dir, 'recipes-test', 'devtool')
+ newrecipefile = os.path.join(newrecipedir, '%s_%s.bb' % (recipe, newversion))
+ self.track_for_cleanup(newrecipedir)
+ result = runCmd('devtool finish %s oe-core' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ self.assertExists(oldrecipefile, 'Old recipe file should not have been deleted')
+ self.assertExists(os.path.join(olddir, patchfn), 'Old patch file should not have been deleted')
+ newdir = os.path.join(newrecipedir, recipe + '-' + newversion)
+ self.assertExists(newrecipefile, 'New recipe file should have been copied into existing layer but wasn\'t')
+ self.assertExists(os.path.join(newdir, patchfn), 'Patch file should have been copied into new directory but wasn\'t')
+ self.assertExists(os.path.join(newdir, '0002-Add-a-comment-to-the-code.patch'), 'New patch file should have been created but wasn\'t')
+
+ def _setup_test_devtool_finish_modify(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ # Try modifying a recipe
+ self.track_for_cleanup(self.workspacedir)
+ recipe = 'mdadm'
+ oldrecipefile = get_bb_var('FILE', recipe)
+ recipedir = os.path.dirname(oldrecipefile)
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains uncommitted changes' % recipe)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ self.track_for_cleanup(tempdir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ result = runCmd('devtool modify %s %s' % (recipe, tempdir))
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ # Test devtool status
+ result = runCmd('devtool status')
+ self.assertIn(recipe, result.output)
+ self.assertIn(tempdir, result.output)
+ # Make a change to the source
+ result = runCmd('sed -i \'/^#include "mdadm.h"/a \\/* Here is a new comment *\\/\' maps.c', cwd=tempdir)
+ result = runCmd('git status --porcelain', cwd=tempdir)
+ self.assertIn('M maps.c', result.output)
+ result = runCmd('git commit maps.c -m "Add a comment to the code"', cwd=tempdir)
+ for entry in os.listdir(recipedir):
+ filesdir = os.path.join(recipedir, entry)
+ if os.path.isdir(filesdir):
+ break
+ else:
+ self.fail('Unable to find recipe files directory for %s' % recipe)
+ return recipe, oldrecipefile, recipedir, filesdir
+
+ @OETestID(1621)
+ def test_devtool_finish_modify_origlayer(self):
+ recipe, oldrecipefile, recipedir, filesdir = self._setup_test_devtool_finish_modify()
+ # Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
+ self.assertIn('/meta/', recipedir)
+ # Try finish to the original layer
+ self.add_command_to_tearDown('rm -rf %s ; cd %s ; git checkout %s' % (recipedir, os.path.dirname(recipedir), recipedir))
+ result = runCmd('devtool finish %s meta' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ expected_status = [(' M', '.*/%s$' % os.path.basename(oldrecipefile)),
+ ('??', '.*/.*-Add-a-comment-to-the-code.patch$')]
+ self._check_repo_status(recipedir, expected_status)
+
+ @OETestID(1622)
+ def test_devtool_finish_modify_otherlayer(self):
+ recipe, oldrecipefile, recipedir, filesdir = self._setup_test_devtool_finish_modify()
+ # Ensure the recipe is where we think it should be (so that cleanup doesn't trash things)
+ self.assertIn('/meta/', recipedir)
+ relpth = os.path.relpath(recipedir, os.path.join(get_bb_var('COREBASE'), 'meta'))
+ appenddir = os.path.join(get_test_layer(), relpth)
+ self.track_for_cleanup(appenddir)
+ # Try finish to the original layer
+ self.add_command_to_tearDown('rm -rf %s ; cd %s ; git checkout %s' % (recipedir, os.path.dirname(recipedir), recipedir))
+ result = runCmd('devtool finish %s meta-selftest' % recipe)
+ result = runCmd('devtool status')
+ self.assertNotIn(recipe, result.output, 'Recipe should have been reset by finish but wasn\'t')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipe), 'Recipe directory should not exist after finish')
+ result = runCmd('git status --porcelain .', cwd=recipedir)
+ if result.output.strip():
+ self.fail('Recipe directory for %s contains the following unexpected changes after finish:\n%s' % (recipe, result.output.strip()))
+ recipefn = os.path.splitext(os.path.basename(oldrecipefile))[0]
+ recipefn = recipefn.split('_')[0] + '_%'
+ appendfile = os.path.join(appenddir, recipefn + '.bbappend')
+ self.assertExists(appendfile, 'bbappend %s should have been created but wasn\'t' % appendfile)
+ newdir = os.path.join(appenddir, recipe)
+ files = os.listdir(newdir)
+ foundpatch = None
+ for fn in files:
+ if fnmatch.fnmatch(fn, '*-Add-a-comment-to-the-code.patch'):
+ foundpatch = fn
+ if not foundpatch:
+ self.fail('No patch file created next to bbappend')
+ files.remove(foundpatch)
+ if files:
+ self.fail('Unexpected file(s) copied next to bbappend: %s' % ', '.join(files))
+
+ @OETestID(1626)
+ def test_devtool_rename(self):
+ # Check preconditions
+ self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory under the build directory')
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+
+ # First run devtool add
+ # We already have this recipe in OE-Core, but that doesn't matter
+ recipename = 'i2c-tools'
+ recipever = '3.1.2'
+ recipefile = os.path.join(self.workspacedir, 'recipes', recipename, '%s_%s.bb' % (recipename, recipever))
+ url = 'http://downloads.yoctoproject.org/mirror/sources/i2c-tools-%s.tar.bz2' % recipever
+ def add_recipe():
+ result = runCmd('devtool add %s' % url)
+ self.assertExists(recipefile, 'Expected recipe file not created')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', recipename), 'Source directory not created')
+ checkvars = {}
+ checkvars['S'] = None
+ checkvars['SRC_URI'] = url.replace(recipever, '${PV}')
+ self._test_recipe_contents(recipefile, checkvars, [])
+ add_recipe()
+ # Now rename it - change both name and version
+ newrecipename = 'mynewrecipe'
+ newrecipever = '456'
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', newrecipename, '%s_%s.bb' % (newrecipename, newrecipever))
+ result = runCmd('devtool rename %s %s -V %s' % (recipename, newrecipename, newrecipever))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipename), 'Old recipe directory still exists')
+ newsrctree = os.path.join(self.workspacedir, 'sources', newrecipename)
+ self.assertExists(newsrctree, 'Source directory not renamed')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/%s-%s' % (recipename, recipever)
+ checkvars['SRC_URI'] = url
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+ # Try again - change just name this time
+ result = runCmd('devtool reset -n %s' % newrecipename)
+ shutil.rmtree(newsrctree)
+ add_recipe()
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', newrecipename, '%s_%s.bb' % (newrecipename, recipever))
+ result = runCmd('devtool rename %s %s' % (recipename, newrecipename))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertNotExists(os.path.join(self.workspacedir, 'recipes', recipename), 'Old recipe directory still exists')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', newrecipename), 'Source directory not renamed')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/%s-${PV}' % recipename
+ checkvars['SRC_URI'] = url.replace(recipever, '${PV}')
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+ # Try again - change just version this time
+ result = runCmd('devtool reset -n %s' % newrecipename)
+ shutil.rmtree(newsrctree)
+ add_recipe()
+ newrecipefile = os.path.join(self.workspacedir, 'recipes', recipename, '%s_%s.bb' % (recipename, newrecipever))
+ result = runCmd('devtool rename %s -V %s' % (recipename, newrecipever))
+ self.assertExists(newrecipefile, 'Recipe file not renamed')
+ self.assertExists(os.path.join(self.workspacedir, 'sources', recipename), 'Source directory no longer exists')
+ checkvars = {}
+ checkvars['S'] = '${WORKDIR}/${BPN}-%s' % recipever
+ checkvars['SRC_URI'] = url
+ self._test_recipe_contents(newrecipefile, checkvars, [])
+
+ @OETestID(1577)
+ def test_devtool_virtual_kernel_modify(self):
+ """
+ Summary: The purpose of this test case is to verify that
+ devtool modify works correctly when building
+ the kernel.
+ Dependencies: NA
+ Steps: 1. Build kernel with bitbake.
+ 2. Save the config file generated.
+ 3. Clean the environment.
+ 4. Use `devtool modify virtual/kernel` to validate following:
+ 4.1 The source is checked out correctly.
+ 4.2 The resulting configuration is the same as
+ what was get on step 2.
+ 4.3 The Kernel can be build correctly.
+ 4.4 Changes made on the source are reflected on the
+ subsequent builds.
+ 4.5 Changes on the configuration are reflected on the
+ subsequent builds
+ Expected: devtool modify is able to checkout the source of the kernel
+ and modification to the source and configurations are reflected
+ when building the kernel.
+ """
+ kernel_provider = get_bb_var('PREFERRED_PROVIDER_virtual/kernel')
+ # Clean up the enviroment
+ bitbake('%s -c clean' % kernel_provider)
+ tempdir = tempfile.mkdtemp(prefix='devtoolqa')
+ tempdir_cfg = tempfile.mkdtemp(prefix='config_qa')
+ self.track_for_cleanup(tempdir)
+ self.track_for_cleanup(tempdir_cfg)
+ self.track_for_cleanup(self.workspacedir)
+ self.add_command_to_tearDown('bitbake-layers remove-layer */workspace')
+ self.add_command_to_tearDown('bitbake -c clean %s' % kernel_provider)
+ #Step 1
+ #Here is just generated the config file instead of all the kernel to optimize the
+ #time of executing this test case.
+ bitbake('%s -c configure' % kernel_provider)
+ bbconfig = os.path.join(get_bb_var('B', kernel_provider),'.config')
+ #Step 2
+ runCmd('cp %s %s' % (bbconfig, tempdir_cfg))
+ self.assertExists(os.path.join(tempdir_cfg, '.config'), 'Could not copy .config file from kernel')
+
+ tmpconfig = os.path.join(tempdir_cfg, '.config')
+ #Step 3
+ bitbake('%s -c clean' % kernel_provider)
+ #Step 4.1
+ runCmd('devtool modify virtual/kernel -x %s' % tempdir)
+ self.assertExists(os.path.join(tempdir, 'Makefile'), 'Extracted source could not be found')
+ #Step 4.2
+ configfile = os.path.join(tempdir,'.config')
+ diff = runCmd('diff %s %s' % (tmpconfig, configfile))
+ self.assertEqual(0,diff.status,'Kernel .config file is not the same using bitbake and devtool')
+ #Step 4.3
+ #NOTE: virtual/kernel is mapped to kernel_provider
+ result = runCmd('devtool build %s' % kernel_provider)
+ self.assertEqual(0,result.status,'Cannot build kernel using `devtool build`')
+ kernelfile = os.path.join(get_bb_var('KBUILD_OUTPUT', kernel_provider), 'vmlinux')
+ self.assertExists(kernelfile, 'Kernel was not build correctly')
+
+ #Modify the kernel source
+ modfile = os.path.join(tempdir,'arch/x86/boot/header.S')
+ modstring = "Use a boot loader. Devtool testing."
+ modapplied = runCmd("sed -i 's/Use a boot loader./%s/' %s" % (modstring, modfile))
+ self.assertEqual(0,modapplied.status,'Modification to %s on kernel source failed' % modfile)
+ #Modify the configuration
+ codeconfigfile = os.path.join(tempdir,'.config.new')
+ modconfopt = "CONFIG_SG_POOL=n"
+ modconf = runCmd("sed -i 's/CONFIG_SG_POOL=y/%s/' %s" % (modconfopt, codeconfigfile))
+ self.assertEqual(0,modconf.status,'Modification to %s failed' % codeconfigfile)
+ #Build again kernel with devtool
+ rebuild = runCmd('devtool build %s' % kernel_provider)
+ self.assertEqual(0,rebuild.status,'Fail to build kernel after modification of source and config')
+ #Step 4.4
+ bzimagename = 'bzImage-' + get_bb_var('KERNEL_VERSION_NAME', kernel_provider)
+ bzimagefile = os.path.join(get_bb_var('D', kernel_provider),'boot', bzimagename)
+ checkmodcode = runCmd("grep '%s' %s" % (modstring, bzimagefile))
+ self.assertEqual(0,checkmodcode.status,'Modification on kernel source failed')
+ #Step 4.5
+ checkmodconfg = runCmd("grep %s %s" % (modconfopt, codeconfigfile))
+ self.assertEqual(0,checkmodconfg.status,'Modification to configuration file failed')
diff --git a/poky/meta/lib/oeqa/selftest/cases/distrodata.py b/poky/meta/lib/oeqa/selftest/cases/distrodata.py
new file mode 100644
index 000000000..7b2800464
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/distrodata.py
@@ -0,0 +1,99 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+from oeqa.utils.decorators import testcase
+from oeqa.utils.ftools import write_file
+from oeqa.core.decorator.oeid import OETestID
+
+class Distrodata(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(Distrodata, cls).setUpClass()
+ feature = 'INHERIT += "distrodata"\n'
+ feature += 'LICENSE_FLAGS_WHITELIST += " commercial"\n'
+
+ cls.write_config(cls, feature)
+ bitbake('-c checkpkg world')
+
+ @OETestID(1902)
+ def test_checkpkg(self):
+ """
+ Summary: Test that upstream version checks do not regress
+ Expected: Upstream version checks should succeed except for the recipes listed in the exception list.
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+ checkpkg_result = open(os.path.join(get_bb_var("LOG_DIR"), "checkpkg.csv")).readlines()[1:]
+ regressed_failures = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] if pkg_data[11] == 'UNKNOWN_BROKEN']
+ regressed_successes = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] if pkg_data[11] == 'KNOWN_BROKEN']
+ msg = ""
+ if len(regressed_failures) > 0:
+ msg = msg + """
+The following packages failed upstream version checks. Please fix them using UPSTREAM_CHECK_URI/UPSTREAM_CHECK_REGEX
+(when using tarballs) or UPSTREAM_CHECK_GITTAGREGEX (when using git). If an upstream version check cannot be performed
+(for example, if upstream does not use git tags), you can set UPSTREAM_VERSION_UNKNOWN to '1' in the recipe to acknowledge
+that the check cannot be performed.
+""" + "\n".join(regressed_failures)
+ if len(regressed_successes) > 0:
+ msg = msg + """
+The following packages have been checked successfully for upstream versions,
+but their recipes claim otherwise by setting UPSTREAM_VERSION_UNKNOWN. Please remove that line from the recipes.
+""" + "\n".join(regressed_successes)
+ self.assertTrue(len(regressed_failures) == 0 and len(regressed_successes) == 0, msg)
+
+ def test_maintainers(self):
+ """
+ Summary: Test that oe-core recipes have a maintainer
+ Expected: All oe-core recipes (except a few special static/testing ones) should have a maintainer listed in maintainers.inc file.
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+ def is_exception(pkg):
+ exceptions = ["packagegroup-", "initramfs-", "systemd-machine-units", "target-sdk-provides-dummy"]
+ for i in exceptions:
+ if i in pkg:
+ return True
+ return False
+
+ def is_in_oe_core(recipe, recipes):
+ self.assertTrue(recipe in recipes.keys(), "Recipe %s was not in 'bitbake-layers show-recipes' output" %(recipe))
+ self.assertTrue(len(recipes[recipe]) > 0, "'bitbake-layers show-recipes' could not determine what layer(s) a recipe %s is in" %(recipe))
+ try:
+ recipes[recipe].index('meta')
+ return True
+ except ValueError:
+ return False
+
+ def get_recipe_layers():
+ import re
+
+ recipes = {}
+ recipe_regex = re.compile('^(?P<name>.*):$')
+ layer_regex = re.compile('^ (?P<name>\S*) +')
+ output = runCmd('bitbake-layers show-recipes').output
+ for line in output.split('\n'):
+ recipe_name_obj = recipe_regex.search(line)
+ if recipe_name_obj:
+ recipe_name = recipe_name_obj.group('name')
+ recipes[recipe_name] = []
+ recipe_layer_obj = layer_regex.search(line)
+ if recipe_layer_obj:
+ layer_name = recipe_layer_obj.group('name')
+ recipes[recipe_name].append(layer_name)
+ return recipes
+
+ checkpkg_result = open(os.path.join(get_bb_var("LOG_DIR"), "checkpkg.csv")).readlines()[1:]
+ recipes_layers = get_recipe_layers()
+ no_maintainer_list = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] \
+ if pkg_data[14] == '' and is_in_oe_core(pkg_data[0], recipes_layers) and not is_exception(pkg_data[0])]
+ msg = """
+The following packages do not have a maintainer assigned to them. Please add an entry to meta/conf/distro/include/maintainers.inc file.
+""" + "\n".join(no_maintainer_list)
+ self.assertTrue(len(no_maintainer_list) == 0, msg)
+
+ with_maintainer_list = [pkg_data[0] for pkg_data in [pkg_line.split('\t') for pkg_line in checkpkg_result] \
+ if pkg_data[14] != '' and is_in_oe_core(pkg_data[0], recipes_layers) and not is_exception(pkg_data[0])]
+ msg = """
+The list of oe-core packages with maintainers is empty. This may indicate that the test has regressed and needs fixing.
+"""
+ self.assertTrue(len(with_maintainer_list) > 0, msg)
diff --git a/poky/meta/lib/oeqa/selftest/cases/eSDK.py b/poky/meta/lib/oeqa/selftest/cases/eSDK.py
new file mode 100644
index 000000000..d03188f2f
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/eSDK.py
@@ -0,0 +1,111 @@
+import tempfile
+import shutil
+import os
+import glob
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+
+class oeSDKExtSelfTest(OESelftestTestCase):
+ """
+ # Bugzilla Test Plan: 6033
+ # This code is planned to be part of the automation for eSDK containig
+ # Install libraries and headers, image generation binary feeds, sdk-update.
+ """
+
+ @staticmethod
+ def get_esdk_environment(env_eSDK, tmpdir_eSDKQA):
+ # XXX: at this time use the first env need to investigate
+ # what environment load oe-selftest, i586, x86_64
+ pattern = os.path.join(tmpdir_eSDKQA, 'environment-setup-*')
+ return glob.glob(pattern)[0]
+
+ @staticmethod
+ def run_esdk_cmd(env_eSDK, tmpdir_eSDKQA, cmd, postconfig=None, **options):
+ if postconfig:
+ esdk_conf_file = os.path.join(tmpdir_eSDKQA, 'conf', 'local.conf')
+ with open(esdk_conf_file, 'a+') as f:
+ f.write(postconfig)
+ if not options:
+ options = {}
+ if not 'shell' in options:
+ options['shell'] = True
+
+ runCmd("cd %s; . %s; %s" % (tmpdir_eSDKQA, env_eSDK, cmd), **options)
+
+ @staticmethod
+ def generate_eSDK(image):
+ pn_task = '%s -c populate_sdk_ext' % image
+ bitbake(pn_task)
+
+ @staticmethod
+ def get_eSDK_toolchain(image):
+ pn_task = '%s -c populate_sdk_ext' % image
+
+ bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAINEXT_OUTPUTNAME'], pn_task)
+ sdk_deploy = bb_vars['SDK_DEPLOY']
+ toolchain_name = bb_vars['TOOLCHAINEXT_OUTPUTNAME']
+ return os.path.join(sdk_deploy, toolchain_name + '.sh')
+
+ @staticmethod
+ def update_configuration(cls, image, tmpdir_eSDKQA, env_eSDK, ext_sdk_path):
+ sstate_dir = os.path.join(os.environ['BUILDDIR'], 'sstate-cache')
+
+ oeSDKExtSelfTest.generate_eSDK(cls.image)
+
+ cls.ext_sdk_path = oeSDKExtSelfTest.get_eSDK_toolchain(cls.image)
+ runCmd("%s -y -d \"%s\"" % (cls.ext_sdk_path, cls.tmpdir_eSDKQA))
+
+ cls.env_eSDK = oeSDKExtSelfTest.get_esdk_environment('', cls.tmpdir_eSDKQA)
+
+ sstate_config="""
+SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
+SSTATE_MIRRORS = "file://.* file://%s/PATH"
+CORE_IMAGE_EXTRA_INSTALL = "perl"
+ """ % sstate_dir
+
+ with open(os.path.join(cls.tmpdir_eSDKQA, 'conf', 'local.conf'), 'a+') as f:
+ f.write(sstate_config)
+
+ @classmethod
+ def setUpClass(cls):
+ super(oeSDKExtSelfTest, cls).setUpClass()
+ cls.tmpdir_eSDKQA = tempfile.mkdtemp(prefix='eSDKQA')
+
+ sstate_dir = get_bb_var('SSTATE_DIR')
+
+ cls.image = 'core-image-minimal'
+ oeSDKExtSelfTest.generate_eSDK(cls.image)
+
+ # Install eSDK
+ cls.ext_sdk_path = oeSDKExtSelfTest.get_eSDK_toolchain(cls.image)
+ runCmd("%s -y -d \"%s\"" % (cls.ext_sdk_path, cls.tmpdir_eSDKQA))
+
+ cls.env_eSDK = oeSDKExtSelfTest.get_esdk_environment('', cls.tmpdir_eSDKQA)
+
+ # Configure eSDK to use sstate mirror from poky
+ sstate_config="""
+SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS"
+SSTATE_MIRRORS = "file://.* file://%s/PATH"
+ """ % sstate_dir
+ with open(os.path.join(cls.tmpdir_eSDKQA, 'conf', 'local.conf'), 'a+') as f:
+ f.write(sstate_config)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_eSDKQA, ignore_errors=True)
+ super(oeSDKExtSelfTest, cls).tearDownClass()
+
+ @OETestID(1602)
+ def test_install_libraries_headers(self):
+ pn_sstate = 'bc'
+ bitbake(pn_sstate)
+ cmd = "devtool sdk-install %s " % pn_sstate
+ oeSDKExtSelfTest.run_esdk_cmd(self.env_eSDK, self.tmpdir_eSDKQA, cmd)
+
+ @OETestID(1603)
+ def test_image_generation_binary_feeds(self):
+ image = 'core-image-minimal'
+ cmd = "devtool build-image %s" % image
+ oeSDKExtSelfTest.run_esdk_cmd(self.env_eSDK, self.tmpdir_eSDKQA, cmd)
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py b/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
new file mode 100644
index 000000000..0c8325669
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/efibootpartition.py
@@ -0,0 +1,45 @@
+# Based on runqemu.py test file
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+
+import re
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var
+
+class GenericEFITest(OESelftestTestCase):
+ """EFI booting test class"""
+
+ buffer = True
+ cmd_common = "runqemu nographic serial wic ovmf"
+ efi_provider = "systemd-boot"
+ image = "core-image-minimal"
+ machine = "qemux86-64"
+ recipes_built = False
+
+ @classmethod
+ def setUpLocal(self):
+ super(GenericEFITest, self).setUpLocal(self)
+
+ self.write_config(self,
+"""
+EFI_PROVIDER = "%s"
+IMAGE_FSTYPES_pn-%s_append = " wic"
+MACHINE = "%s"
+MACHINE_FEATURES_append = " efi"
+WKS_FILE = "efi-bootdisk.wks.in"
+IMAGE_INSTALL_append = " grub-efi systemd-boot kernel-image-bzimage"
+"""
+% (self.efi_provider, self.image, self.machine))
+ if not self.recipes_built:
+ bitbake("ovmf")
+ bitbake(self.image)
+ self.recipes_built = True
+
+ @classmethod
+ def test_boot_efi(self):
+ """Test generic boot partition with qemu"""
+ cmd = "%s %s" % (self.cmd_common, self.machine)
+ with runqemu(self.image, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
diff --git a/poky/meta/lib/oeqa/selftest/cases/gotoolchain.py b/poky/meta/lib/oeqa/selftest/cases/gotoolchain.py
new file mode 100644
index 000000000..1e23257f4
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/gotoolchain.py
@@ -0,0 +1,67 @@
+import glob
+import os
+import shutil
+import tempfile
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
+
+
+class oeGoToolchainSelfTest(OESelftestTestCase):
+ """
+ Test cases for OE's Go toolchain
+ """
+
+ @staticmethod
+ def get_sdk_environment(tmpdir_SDKQA):
+ pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
+ # FIXME: this is a very naive implementation
+ return glob.glob(pattern)[0]
+
+ @staticmethod
+ def get_sdk_toolchain():
+ bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
+ "meta-go-toolchain")
+ sdk_deploy = bb_vars['SDK_DEPLOY']
+ toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
+ return os.path.join(sdk_deploy, toolchain_name + ".sh")
+
+ @classmethod
+ def setUpClass(cls):
+ super(oeGoToolchainSelfTest, cls).setUpClass()
+ cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
+ cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
+ # Build the SDK and locate it in DEPLOYDIR
+ bitbake("meta-go-toolchain")
+ cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
+ # Install the SDK into the tmpdir
+ runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
+ cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
+ super(oeGoToolchainSelfTest, cls).tearDownClass()
+
+ def run_sdk_go_command(self, gocmd):
+ cmd = "cd %s; " % self.tmpdir_SDKQA
+ cmd = cmd + ". %s; " % self.env_SDK
+ cmd = cmd + "export GOPATH=%s; " % self.go_path
+ cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
+ return runCmd(cmd).status
+
+ def test_go_dep_build(self):
+ proj = "github.com/golang"
+ name = "dep"
+ ver = "v0.3.1"
+ archive = ".tar.gz"
+ url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
+
+ runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
+ runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
+ runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
+ runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
+ % (self.tmpdir_SDKQA, self.go_path, proj, name))
+ retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
+ % (proj, name))
+ self.assertEqual(retv, 0,
+ msg="Running go build failed for %s" % name)
diff --git a/poky/meta/lib/oeqa/selftest/cases/image_typedep.py b/poky/meta/lib/oeqa/selftest/cases/image_typedep.py
new file mode 100644
index 000000000..e6788853a
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/image_typedep.py
@@ -0,0 +1,53 @@
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+from oeqa.core.decorator.oeid import OETestID
+
+class ImageTypeDepTests(OESelftestTestCase):
+
+ # Verify that when specifying a IMAGE_TYPEDEP_ of the form "foo.bar" that
+ # the conversion type bar gets added as a dep as well
+ @OETestID(1633)
+ def test_conversion_typedep_added(self):
+
+ self.write_recipeinc('emptytest', """
+# Try to empty out the default dependency list
+PACKAGE_INSTALL = ""
+DISTRO_EXTRA_RDEPENDS=""
+
+LICENSE = "MIT"
+IMAGE_FSTYPES = "testfstype"
+
+IMAGE_TYPES_MASKED += "testfstype"
+IMAGE_TYPEDEP_testfstype = "tar.bz2"
+
+inherit image
+
+""")
+ # First get the dependency that should exist for bz2, it will look
+ # like CONVERSION_DEPENDS_bz2="somedep"
+ result = bitbake('-e emptytest')
+
+ for line in result.output.split('\n'):
+ if line.startswith('CONVERSION_DEPENDS_bz2'):
+ dep = line.split('=')[1].strip('"')
+ break
+
+ # Now get the dependency task list and check for the expected task
+ # dependency
+ bitbake('-g emptytest')
+
+ taskdependsfile = os.path.join(self.builddir, 'task-depends.dot')
+ dep = dep + ".do_populate_sysroot"
+ depfound = False
+ expectedline = '"emptytest.do_rootfs" -> "{}"'.format(dep)
+
+ with open(taskdependsfile, "r") as f:
+ for line in f:
+ if line.strip() == expectedline:
+ depfound = True
+ break
+
+ if not depfound:
+ raise AssertionError("\"{}\" not found".format(expectedline))
diff --git a/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
new file mode 100644
index 000000000..09e0b2062
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py
@@ -0,0 +1,240 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, runqemu
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.sshcontrol import SSHControl
+import os
+import json
+
+class ImageFeatures(OESelftestTestCase):
+
+ test_user = 'tester'
+ root_user = 'root'
+
+ buffer = True
+
+ @OETestID(1107)
+ def test_non_root_user_can_connect_via_ssh_without_password(self):
+ """
+ Summary: Check if non root user can connect via ssh without password
+ Expected: 1. Connection to the image via ssh using root user without providing a password should be allowed.
+ 2. Connection to the image via ssh using tester user without providing a password should be allowed.
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh empty-root-password allow-empty-password"\n'
+ features += 'INHERIT += "extrausers"\n'
+ features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
+ self.write_config(features)
+
+ # Build a core-image-minimal
+ bitbake('core-image-minimal')
+
+ with runqemu("core-image-minimal") as qemu:
+ # Attempt to ssh with each user into qemu with empty password
+ for user in [self.root_user, self.test_user]:
+ ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
+ status, output = ssh.run("true")
+ self.assertEqual(status, 0, 'ssh to user %s failed with %s' % (user, output))
+
+ @OETestID(1115)
+ def test_all_users_can_connect_via_ssh_without_password(self):
+ """
+ Summary: Check if all users can connect via ssh without password
+ Expected: 1. Connection to the image via ssh using root user without providing a password should NOT be allowed.
+ 2. Connection to the image via ssh using tester user without providing a password should be allowed.
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ features = 'EXTRA_IMAGE_FEATURES = "ssh-server-openssh allow-empty-password"\n'
+ features += 'INHERIT += "extrausers"\n'
+ features += 'EXTRA_USERS_PARAMS = "useradd -p \'\' {}; usermod -s /bin/sh {};"'.format(self.test_user, self.test_user)
+ self.write_config(features)
+
+ # Build a core-image-minimal
+ bitbake('core-image-minimal')
+
+ with runqemu("core-image-minimal") as qemu:
+ # Attempt to ssh with each user into qemu with empty password
+ for user in [self.root_user, self.test_user]:
+ ssh = SSHControl(ip=qemu.ip, logfile=qemu.sshlog, user=user)
+ status, output = ssh.run("true")
+ if user == 'root':
+ self.assertNotEqual(status, 0, 'ssh to user root was allowed when it should not have been')
+ else:
+ self.assertEqual(status, 0, 'ssh to user tester failed with %s' % output)
+
+
+ @OETestID(1116)
+ def test_clutter_image_can_be_built(self):
+ """
+ Summary: Check if clutter image can be built
+ Expected: 1. core-image-clutter can be built
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ # Build a core-image-clutter
+ bitbake('core-image-clutter')
+
+ @OETestID(1117)
+ def test_wayland_support_in_image(self):
+ """
+ Summary: Check Wayland support in image
+ Expected: 1. Wayland image can be build
+ 2. Wayland feature can be installed
+ Product: oe-core
+ Author: Ionut Chisanovici <ionutx.chisanovici@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ distro_features = get_bb_var('DISTRO_FEATURES')
+ if not ('opengl' in distro_features and 'wayland' in distro_features):
+ self.skipTest('neither opengl nor wayland present on DISTRO_FEATURES so core-image-weston cannot be built')
+
+ # Build a core-image-weston
+ bitbake('core-image-weston')
+
+ @OETestID(1497)
+ def test_bmap(self):
+ """
+ Summary: Check bmap support
+ Expected: 1. core-image-minimal can be build with bmap support
+ 2. core-image-minimal is sparse
+ Product: oe-core
+ Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+ """
+
+ features = 'IMAGE_FSTYPES += " ext4 ext4.bmap ext4.bmap.gz"'
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ image_path = os.path.join(deploy_dir_image, "%s.ext4" % link_name)
+ bmap_path = "%s.bmap" % image_path
+ gzip_path = "%s.gz" % bmap_path
+
+ # check if result image, bmap and bmap.gz files are in deploy directory
+ self.assertTrue(os.path.exists(image_path))
+ self.assertTrue(os.path.exists(bmap_path))
+ self.assertTrue(os.path.exists(gzip_path))
+
+ # check if result image is sparse
+ image_stat = os.stat(image_path)
+ self.assertTrue(image_stat.st_size > image_stat.st_blocks * 512)
+
+ # check if the resulting gzip is valid
+ self.assertTrue(runCmd('gzip -t %s' % gzip_path))
+
+ @OETestID(1903)
+ def test_hypervisor_fmts(self):
+ """
+ Summary: Check various hypervisor formats
+ Expected: 1. core-image-minimal can be built with vmdk, vdi and
+ qcow2 support.
+ 2. qemu-img says each image has the expected format
+ Product: oe-core
+ Author: Tom Rini <trini@konsulko.com>
+ """
+
+ img_types = [ 'vmdk', 'vdi', 'qcow2' ]
+ features = ""
+ for itype in img_types:
+ features += 'IMAGE_FSTYPES += "wic.%s"\n' % itype
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ for itype in img_types:
+ image_path = os.path.join(deploy_dir_image, "%s.wic.%s" %
+ (link_name, itype))
+
+ # check if result image file is in deploy directory
+ self.assertTrue(os.path.exists(image_path))
+
+ # check if result image is vmdk
+ sysroot = get_bb_var('STAGING_DIR_NATIVE', 'core-image-minimal')
+ result = runCmd('qemu-img info --output json %s' % image_path,
+ native_sysroot=sysroot)
+ self.assertTrue(json.loads(result.output).get('format') == itype)
+
+ @OETestID(1905)
+ def test_long_chain_conversion(self):
+ """
+ Summary: Check for chaining many CONVERSION_CMDs together
+ Expected: 1. core-image-minimal can be built with
+ ext4.bmap.gz.bz2.lzo.xz.u-boot and also create a
+ sha256sum
+ 2. The above image has a valid sha256sum
+ Product: oe-core
+ Author: Tom Rini <trini@konsulko.com>
+ """
+
+ conv = "ext4.bmap.gz.bz2.lzo.xz.u-boot"
+ features = 'IMAGE_FSTYPES += "%s %s.sha256sum"' % (conv, conv)
+ self.write_config(features)
+
+ image_name = 'core-image-minimal'
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ image_path = os.path.join(deploy_dir_image, "%s.%s" %
+ (link_name, conv))
+
+ # check if resulting image is in the deploy directory
+ self.assertTrue(os.path.exists(image_path))
+ self.assertTrue(os.path.exists(image_path + ".sha256sum"))
+
+ # check if the resulting sha256sum agrees
+ self.assertTrue(runCmd('cd %s;sha256sum -c %s.%s.sha256sum' %
+ (deploy_dir_image, link_name, conv)))
+
+ @OETestID(1904)
+ def test_image_fstypes(self):
+ """
+ Summary: Check if image of supported image fstypes can be built
+ Expected: core-image-minimal can be built for various image types
+ Product: oe-core
+ Author: Ed Bartosh <ed.bartosh@linux.intel.com>
+ """
+ image_name = 'core-image-minimal'
+
+ img_types = [itype for itype in get_bb_var("IMAGE_TYPES", image_name).split() \
+ if itype not in ('container', 'elf', 'f2fs', 'multiubi')]
+
+ config = 'IMAGE_FSTYPES += "%s"\n'\
+ 'MKUBIFS_ARGS ?= "-m 2048 -e 129024 -c 2047"\n'\
+ 'UBINIZE_ARGS ?= "-m 2048 -p 128KiB -s 512"' % ' '.join(img_types)
+
+ self.write_config(config)
+
+ bitbake(image_name)
+
+ deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ link_name = get_bb_var('IMAGE_LINK_NAME', image_name)
+ for itype in img_types:
+ image_path = os.path.join(deploy_dir_image, "%s.%s" % (link_name, itype))
+ # check if result image is in deploy directory
+ self.assertTrue(os.path.exists(image_path),
+ "%s image %s doesn't exist" % (itype, image_path))
+
+ def test_useradd_static(self):
+ config = """
+USERADDEXTENSION = "useradd-staticids"
+USERADD_ERROR_DYNAMIC = "skip"
+USERADD_UID_TABLES += "files/static-passwd"
+USERADD_GID_TABLES += "files/static-group"
+"""
+ self.write_config(config)
+ bitbake("core-image-base")
diff --git a/poky/meta/lib/oeqa/selftest/cases/layerappend.py b/poky/meta/lib/oeqa/selftest/cases/layerappend.py
new file mode 100644
index 000000000..2fd5cdb0c
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/layerappend.py
@@ -0,0 +1,95 @@
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+import oeqa.utils.ftools as ftools
+from oeqa.core.decorator.oeid import OETestID
+
+class LayerAppendTests(OESelftestTestCase):
+ layerconf = """
+# We have a conf and classes directory, append to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have a recipes directory, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes*/*.bb ${LAYERDIR}/recipes*/*.bbappend"
+
+BBFILE_COLLECTIONS += "meta-layerINT"
+BBFILE_PATTERN_meta-layerINT := "^${LAYERDIR}/"
+BBFILE_PRIORITY_meta-layerINT = "6"
+"""
+ recipe = """
+LICENSE="CLOSED"
+INHIBIT_DEFAULT_DEPS = "1"
+
+python do_build() {
+ bb.plain('Building ...')
+}
+addtask build
+"""
+ append = """
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI_append = " file://appendtest.txt"
+
+sysroot_stage_all_append() {
+ install -m 644 ${WORKDIR}/appendtest.txt ${SYSROOT_DESTDIR}/
+}
+
+"""
+
+ append2 = """
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI_append = " file://appendtest.txt"
+"""
+ layerappend = ''
+
+ def tearDownLocal(self):
+ if self.layerappend:
+ ftools.remove_from_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
+ super(LayerAppendTests, self).tearDownLocal()
+
+ @OETestID(1196)
+ def test_layer_appends(self):
+ corebase = get_bb_var("COREBASE")
+
+ for l in ["0", "1", "2"]:
+ layer = os.path.join(corebase, "meta-layertest" + l)
+ self.assertFalse(os.path.exists(layer))
+ os.mkdir(layer)
+ os.mkdir(layer + "/conf")
+ with open(layer + "/conf/layer.conf", "w") as f:
+ f.write(self.layerconf.replace("INT", l))
+ os.mkdir(layer + "/recipes-test")
+ if l == "0":
+ with open(layer + "/recipes-test/layerappendtest.bb", "w") as f:
+ f.write(self.recipe)
+ elif l == "1":
+ with open(layer + "/recipes-test/layerappendtest.bbappend", "w") as f:
+ f.write(self.append)
+ os.mkdir(layer + "/recipes-test/layerappendtest")
+ with open(layer + "/recipes-test/layerappendtest/appendtest.txt", "w") as f:
+ f.write("Layer 1 test")
+ elif l == "2":
+ with open(layer + "/recipes-test/layerappendtest.bbappend", "w") as f:
+ f.write(self.append2)
+ os.mkdir(layer + "/recipes-test/layerappendtest")
+ with open(layer + "/recipes-test/layerappendtest/appendtest.txt", "w") as f:
+ f.write("Layer 2 test")
+ self.track_for_cleanup(layer)
+
+ self.layerappend = "BBLAYERS += \"{0}/meta-layertest0 {0}/meta-layertest1 {0}/meta-layertest2\"".format(corebase)
+ ftools.append_file(self.builddir + "/conf/bblayers.conf", self.layerappend)
+ stagingdir = get_bb_var("SYSROOT_DESTDIR", "layerappendtest")
+ bitbake("layerappendtest")
+ data = ftools.read_file(stagingdir + "/appendtest.txt")
+ self.assertEqual(data, "Layer 2 test")
+ os.remove(corebase + "/meta-layertest2/recipes-test/layerappendtest/appendtest.txt")
+ bitbake("layerappendtest")
+ data = ftools.read_file(stagingdir + "/appendtest.txt")
+ self.assertEqual(data, "Layer 1 test")
+ with open(corebase + "/meta-layertest2/recipes-test/layerappendtest/appendtest.txt", "w") as f:
+ f.write("Layer 2 test")
+ bitbake("layerappendtest")
+ data = ftools.read_file(stagingdir + "/appendtest.txt")
+ self.assertEqual(data, "Layer 2 test")
diff --git a/poky/meta/lib/oeqa/selftest/cases/liboe.py b/poky/meta/lib/oeqa/selftest/cases/liboe.py
new file mode 100644
index 000000000..e84609246
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/liboe.py
@@ -0,0 +1,102 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.commands import get_bb_var, get_bb_vars, bitbake, runCmd
+import oe.path
+import os
+
+class LibOE(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(LibOE, cls).setUpClass()
+ cls.tmp_dir = get_bb_var('TMPDIR')
+
+ @OETestID(1635)
+ def test_copy_tree_special(self):
+ """
+ Summary: oe.path.copytree() should copy files with special character
+ Expected: 'test file with sp£c!al @nd spaces' should exist in
+ copy destination
+ Product: OE-Core
+ Author: Joshua Lock <joshua.g.lock@intel.com>
+ """
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
+ src = oe.path.join(testloc, 'src')
+ dst = oe.path.join(testloc, 'dst')
+ bb.utils.mkdirhier(testloc)
+ bb.utils.mkdirhier(src)
+ testfilename = 'test file with sp£c!al @nd spaces'
+
+ # create the test file and copy it
+ open(oe.path.join(src, testfilename), 'w+b').close()
+ oe.path.copytree(src, dst)
+
+ # ensure path exists in dest
+ fileindst = os.path.isfile(oe.path.join(dst, testfilename))
+ self.assertTrue(fileindst, "File with spaces doesn't exist in dst")
+
+ oe.path.remove(testloc)
+
+ @OETestID(1636)
+ def test_copy_tree_xattr(self):
+ """
+ Summary: oe.path.copytree() should preserve xattr on copied files
+ Expected: testxattr file in destination should have user.oetest
+ extended attribute
+ Product: OE-Core
+ Author: Joshua Lock <joshua.g.lock@intel.com>
+ """
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
+ src = oe.path.join(testloc, 'src')
+ dst = oe.path.join(testloc, 'dst')
+ bb.utils.mkdirhier(testloc)
+ bb.utils.mkdirhier(src)
+ testfilename = 'testxattr'
+
+ # ensure we have setfattr available
+ bitbake("attr-native")
+
+ bb_vars = get_bb_vars(['SYSROOT_DESTDIR', 'bindir'], 'attr-native')
+ destdir = bb_vars['SYSROOT_DESTDIR']
+ bindir = bb_vars['bindir']
+ bindir = destdir + bindir
+
+ # create a file with xattr and copy it
+ open(oe.path.join(src, testfilename), 'w+b').close()
+ runCmd('%s/setfattr -n user.oetest -v "testing liboe" %s' % (bindir, oe.path.join(src, testfilename)))
+ oe.path.copytree(src, dst)
+
+ # ensure file in dest has user.oetest xattr
+ result = runCmd('%s/getfattr -n user.oetest %s' % (bindir, oe.path.join(dst, testfilename)))
+ self.assertIn('user.oetest="testing liboe"', result.output, 'Extended attribute not sert in dst')
+
+ oe.path.remove(testloc)
+
+ @OETestID(1634)
+ def test_copy_hardlink_tree_count(self):
+ """
+ Summary: oe.path.copyhardlinktree() shouldn't miss out files
+ Expected: src and dst should have the same number of files
+ Product: OE-Core
+ Author: Joshua Lock <joshua.g.lock@intel.com>
+ """
+ testloc = oe.path.join(self.tmp_dir, 'liboetests')
+ src = oe.path.join(testloc, 'src')
+ dst = oe.path.join(testloc, 'dst')
+ bb.utils.mkdirhier(testloc)
+ bb.utils.mkdirhier(src)
+ testfiles = ['foo', 'bar', '.baz', 'quux']
+
+ def touchfile(tf):
+ open(oe.path.join(src, tf), 'w+b').close()
+
+ for f in testfiles:
+ touchfile(f)
+
+ oe.path.copyhardlinktree(src, dst)
+
+ dstcnt = len(os.listdir(dst))
+ srccnt = len(os.listdir(src))
+ self.assertEquals(dstcnt, len(testfiles), "Number of files in dst (%s) differs from number of files in src(%s)." % (dstcnt, srccnt))
+
+ oe.path.remove(testloc)
diff --git a/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py b/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py
new file mode 100644
index 000000000..37407157c
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/lic_checksum.py
@@ -0,0 +1,35 @@
+import os
+import tempfile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake
+from oeqa.utils import CommandError
+from oeqa.core.decorator.oeid import OETestID
+
+class LicenseTests(OESelftestTestCase):
+
+ # Verify that changing a license file that has an absolute path causes
+ # the license qa to fail due to a mismatched md5sum.
+ @OETestID(1197)
+ def test_nonmatching_checksum(self):
+ bitbake_cmd = '-c populate_lic emptytest'
+ error_msg = 'emptytest: The new md5 checksum is 8d777f385d3dfec8815d20f7496026dc'
+
+ lic_file, lic_path = tempfile.mkstemp()
+ os.close(lic_file)
+ self.track_for_cleanup(lic_path)
+
+ self.write_recipeinc('emptytest', """
+INHIBIT_DEFAULT_DEPS = "1"
+LIC_FILES_CHKSUM = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
+SRC_URI = "file://%s;md5=d41d8cd98f00b204e9800998ecf8427e"
+""" % (lic_path, lic_path))
+ result = bitbake(bitbake_cmd)
+
+ with open(lic_path, "w") as f:
+ f.write("data")
+
+ self.write_config("INHERIT_remove = \"report-error\"")
+ result = bitbake(bitbake_cmd, ignore_status=True)
+ if error_msg not in result.output:
+ raise AssertionError(result.output)
diff --git a/poky/meta/lib/oeqa/selftest/cases/manifest.py b/poky/meta/lib/oeqa/selftest/cases/manifest.py
new file mode 100644
index 000000000..146071934
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/manifest.py
@@ -0,0 +1,166 @@
+import os
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import get_bb_var, get_bb_vars, bitbake
+from oeqa.core.decorator.oeid import OETestID
+
+class ManifestEntry:
+ '''A manifest item of a collection able to list missing packages'''
+ def __init__(self, entry):
+ self.file = entry
+ self.missing = []
+
+class VerifyManifest(OESelftestTestCase):
+ '''Tests for the manifest files and contents of an image'''
+
+ @classmethod
+ def check_manifest_entries(self, manifest, path):
+ manifest_errors = []
+ try:
+ with open(manifest, "r") as mfile:
+ for line in mfile:
+ manifest_entry = os.path.join(path, line.split()[0])
+ self.logger.debug("{}: looking for {}"\
+ .format(self.classname, manifest_entry))
+ if not os.path.isfile(manifest_entry):
+ manifest_errors.append(manifest_entry)
+ self.logger.debug("{}: {} not found"\
+ .format(self.classname, manifest_entry))
+ except OSError as e:
+ self.logger.debug("{}: checking of {} failed"\
+ .format(self.classname, manifest))
+ raise e
+
+ return manifest_errors
+
+ #this will possibly move from here
+ @classmethod
+ def get_dir_from_bb_var(self, bb_var, target = None):
+ target == self.buildtarget if target == None else target
+ directory = get_bb_var(bb_var, target);
+ if not directory or not os.path.isdir(directory):
+ self.logger.debug("{}: {} points to {} when target = {}"\
+ .format(self.classname, bb_var, directory, target))
+ raise OSError
+ return directory
+
+ @classmethod
+ def setUpClass(self):
+
+ super(VerifyManifest, self).setUpClass()
+ self.buildtarget = 'core-image-minimal'
+ self.classname = 'VerifyManifest'
+
+ self.logger.info("{}: doing bitbake {} as a prerequisite of the test"\
+ .format(self.classname, self.buildtarget))
+ if bitbake(self.buildtarget).status:
+ self.logger.debug("{} Failed to setup {}"\
+ .format(self.classname, self.buildtarget))
+ self.skipTest("{}: Cannot setup testing scenario"\
+ .format(self.classname))
+
+ @OETestID(1380)
+ def test_SDK_manifest_entries(self):
+ '''Verifying the SDK manifest entries exist, this may take a build'''
+
+ # the setup should bitbake core-image-minimal and here it is required
+ # to do an additional setup for the sdk
+ sdktask = '-c populate_sdk'
+ bbargs = sdktask + ' ' + self.buildtarget
+ self.logger.debug("{}: doing bitbake {} as a prerequisite of the test"\
+ .format(self.classname, bbargs))
+ if bitbake(bbargs).status:
+ self.logger.debug("{} Failed to bitbake {}"\
+ .format(self.classname, bbargs))
+ self.skipTest("{}: Cannot setup testing scenario"\
+ .format(self.classname))
+
+
+ pkgdata_dir = reverse_dir = {}
+ mfilename = mpath = m_entry = {}
+ # get manifest location based on target to query about
+ d_target= dict(target = self.buildtarget,
+ host = 'nativesdk-packagegroup-sdk-host')
+ try:
+ mdir = self.get_dir_from_bb_var('SDK_DEPLOY', self.buildtarget)
+ for k in d_target.keys():
+ bb_vars = get_bb_vars(['SDK_NAME', 'SDK_VERSION'], self.buildtarget)
+ mfilename[k] = "{}-toolchain-{}.{}.manifest".format(
+ bb_vars['SDK_NAME'],
+ bb_vars['SDK_VERSION'],
+ k)
+ mpath[k] = os.path.join(mdir, mfilename[k])
+ if not os.path.isfile(mpath[k]):
+ self.logger.debug("{}: {} does not exist".format(
+ self.classname, mpath[k]))
+ raise IOError
+ m_entry[k] = ManifestEntry(mpath[k])
+
+ pkgdata_dir[k] = self.get_dir_from_bb_var('PKGDATA_DIR',
+ d_target[k])
+ reverse_dir[k] = os.path.join(pkgdata_dir[k],
+ 'runtime-reverse')
+ if not os.path.exists(reverse_dir[k]):
+ self.logger.debug("{}: {} does not exist".format(
+ self.classname, reverse_dir[k]))
+ raise IOError
+ except OSError:
+ raise self.skipTest("{}: Error in obtaining manifest dirs"\
+ .format(self.classname))
+ except IOError:
+ msg = "{}: Error cannot find manifests in the specified dir:\n{}"\
+ .format(self.classname, mdir)
+ self.fail(msg)
+
+ for k in d_target.keys():
+ self.logger.debug("{}: Check manifest {}".format(
+ self.classname, m_entry[k].file))
+
+ m_entry[k].missing = self.check_manifest_entries(\
+ m_entry[k].file,reverse_dir[k])
+ if m_entry[k].missing:
+ msg = '{}: {} Error has the following missing entries'\
+ .format(self.classname, m_entry[k].file)
+ logmsg = msg+':\n'+'\n'.join(m_entry[k].missing)
+ self.logger.debug(logmsg)
+ self.logger.info(msg)
+ self.fail(logmsg)
+
+ @OETestID(1381)
+ def test_image_manifest_entries(self):
+ '''Verifying the image manifest entries exist'''
+
+ # get manifest location based on target to query about
+ try:
+ mdir = self.get_dir_from_bb_var('DEPLOY_DIR_IMAGE',
+ self.buildtarget)
+ mfilename = get_bb_var("IMAGE_LINK_NAME", self.buildtarget)\
+ + ".manifest"
+ mpath = os.path.join(mdir, mfilename)
+ if not os.path.isfile(mpath): raise IOError
+ m_entry = ManifestEntry(mpath)
+
+ pkgdata_dir = {}
+ pkgdata_dir = self.get_dir_from_bb_var('PKGDATA_DIR',
+ self.buildtarget)
+ revdir = os.path.join(pkgdata_dir, 'runtime-reverse')
+ if not os.path.exists(revdir): raise IOError
+ except OSError:
+ raise self.skipTest("{}: Error in obtaining manifest dirs"\
+ .format(self.classname))
+ except IOError:
+ msg = "{}: Error cannot find manifests in dir:\n{}"\
+ .format(self.classname, mdir)
+ self.fail(msg)
+
+ self.logger.debug("{}: Check manifest {}"\
+ .format(self.classname, m_entry.file))
+ m_entry.missing = self.check_manifest_entries(\
+ m_entry.file, revdir)
+ if m_entry.missing:
+ msg = '{}: {} Error has the following missing entries'\
+ .format(self.classname, m_entry.file)
+ logmsg = msg+':\n'+'\n'.join(m_entry.missing)
+ self.logger.debug(logmsg)
+ self.logger.info(msg)
+ self.fail(logmsg)
diff --git a/poky/meta/lib/oeqa/selftest/cases/meta_ide.py b/poky/meta/lib/oeqa/selftest/cases/meta_ide.py
new file mode 100644
index 000000000..5df9d3ed9
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/meta_ide.py
@@ -0,0 +1,49 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.sdk.utils.sdkbuildproject import SDKBuildProject
+from oeqa.utils.commands import bitbake, get_bb_vars, runCmd
+from oeqa.core.decorator.oeid import OETestID
+import tempfile
+import shutil
+
+class MetaIDE(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(MetaIDE, cls).setUpClass()
+ bitbake('meta-ide-support')
+ bb_vars = get_bb_vars(['MULTIMACH_TARGET_SYS', 'TMPDIR', 'COREBASE'])
+ cls.environment_script = 'environment-setup-%s' % bb_vars['MULTIMACH_TARGET_SYS']
+ cls.tmpdir = bb_vars['TMPDIR']
+ cls.environment_script_path = '%s/%s' % (cls.tmpdir, cls.environment_script)
+ cls.corebasedir = bb_vars['COREBASE']
+ cls.tmpdir_metaideQA = tempfile.mkdtemp(prefix='metaide')
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.tmpdir_metaideQA, ignore_errors=True)
+ super(MetaIDE, cls).tearDownClass()
+
+ @OETestID(1982)
+ def test_meta_ide_had_installed_meta_ide_support(self):
+ self.assertExists(self.environment_script_path)
+
+ @OETestID(1983)
+ def test_meta_ide_can_compile_c_program(self):
+ runCmd('cp %s/test.c %s' % (self.tc.files_dir, self.tmpdir_metaideQA))
+ runCmd("cd %s; . %s; $CC test.c -lm" % (self.tmpdir_metaideQA, self.environment_script_path))
+ compiled_file = '%s/a.out' % self.tmpdir_metaideQA
+ self.assertExists(compiled_file)
+
+ @OETestID(1984)
+ def test_meta_ide_can_build_cpio_project(self):
+ dl_dir = self.td.get('DL_DIR', None)
+ self.project = SDKBuildProject(self.tmpdir_metaideQA + "/cpio/", self.environment_script_path,
+ "https://ftp.gnu.org/gnu/cpio/cpio-2.12.tar.gz",
+ self.tmpdir_metaideQA, self.td['DATETIME'], dl_dir=dl_dir)
+ self.project.download_archive()
+ self.assertEqual(self.project.run_configure(), 0,
+ msg="Running configure failed")
+ self.assertEqual(self.project.run_make(), 0,
+ msg="Running make failed")
+ self.assertEqual(self.project.run_install(), 0,
+ msg="Running make install failed")
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/__init__.py b/poky/meta/lib/oeqa/selftest/cases/oelib/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/__init__.py
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py b/poky/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
new file mode 100644
index 000000000..08675fd82
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/buildhistory.py
@@ -0,0 +1,99 @@
+import os
+from oeqa.selftest.case import OESelftestTestCase
+import tempfile
+from oeqa.utils.commands import get_bb_var
+from oeqa.core.decorator.oeid import OETestID
+
+class TestBlobParsing(OESelftestTestCase):
+
+ def setUp(self):
+ import time
+ self.repo_path = tempfile.mkdtemp(prefix='selftest-buildhistory',
+ dir=get_bb_var('TOPDIR'))
+
+ try:
+ from git import Repo
+ self.repo = Repo.init(self.repo_path)
+ except ImportError:
+ self.skipTest('Python module GitPython is not present')
+
+ self.test_file = "test"
+ self.var_map = {}
+
+ def tearDown(self):
+ import shutil
+ shutil.rmtree(self.repo_path)
+
+ def commit_vars(self, to_add={}, to_remove = [], msg="A commit message"):
+ if len(to_add) == 0 and len(to_remove) == 0:
+ return
+
+ for k in to_remove:
+ self.var_map.pop(x,None)
+ for k in to_add:
+ self.var_map[k] = to_add[k]
+
+ with open(os.path.join(self.repo_path, self.test_file), 'w') as repo_file:
+ for k in self.var_map:
+ repo_file.write("%s = %s\n" % (k, self.var_map[k]))
+
+ self.repo.git.add("--all")
+ self.repo.git.commit(message=msg)
+
+ @OETestID(1859)
+ def test_blob_to_dict(self):
+ """
+ Test convertion of git blobs to dictionary
+ """
+ from oe.buildhistory_analysis import blob_to_dict
+ valuesmap = { "foo" : "1", "bar" : "2" }
+ self.commit_vars(to_add = valuesmap)
+
+ blob = self.repo.head.commit.tree.blobs[0]
+ self.assertEqual(valuesmap, blob_to_dict(blob),
+ "commit was not translated correctly to dictionary")
+
+ @OETestID(1860)
+ def test_compare_dict_blobs(self):
+ """
+ Test comparisson of dictionaries extracted from git blobs
+ """
+ from oe.buildhistory_analysis import compare_dict_blobs
+
+ changesmap = { "foo-2" : ("2", "8"), "bar" : ("","4"), "bar-2" : ("","5")}
+
+ self.commit_vars(to_add = { "foo" : "1", "foo-2" : "2", "foo-3" : "3" })
+ blob1 = self.repo.heads.master.commit.tree.blobs[0]
+
+ self.commit_vars(to_add = { "foo-2" : "8", "bar" : "4", "bar-2" : "5" })
+ blob2 = self.repo.heads.master.commit.tree.blobs[0]
+
+ change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
+ blob1, blob2, False, False)
+
+ var_changes = { x.fieldname : (x.oldvalue, x.newvalue) for x in change_records}
+ self.assertEqual(changesmap, var_changes, "Changes not reported correctly")
+
+ @OETestID(1861)
+ def test_compare_dict_blobs_default(self):
+ """
+ Test default values for comparisson of git blob dictionaries
+ """
+ from oe.buildhistory_analysis import compare_dict_blobs
+ defaultmap = { x : ("default", "1") for x in ["PKG", "PKGE", "PKGV", "PKGR"]}
+
+ self.commit_vars(to_add = { "foo" : "1" })
+ blob1 = self.repo.heads.master.commit.tree.blobs[0]
+
+ self.commit_vars(to_add = { "PKG" : "1", "PKGE" : "1", "PKGV" : "1", "PKGR" : "1" })
+ blob2 = self.repo.heads.master.commit.tree.blobs[0]
+
+ change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
+ blob1, blob2, False, False)
+
+ var_changes = {}
+ for x in change_records:
+ oldvalue = "default" if ("default" in x.oldvalue) else x.oldvalue
+ var_changes[x.fieldname] = (oldvalue, x.newvalue)
+
+ self.assertEqual(defaultmap, var_changes, "Defaults not set properly")
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py b/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py
new file mode 100644
index 000000000..74ee6a11c
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/elf.py
@@ -0,0 +1,21 @@
+from unittest.case import TestCase
+import oe.qa
+
+class TestElf(TestCase):
+ def test_machine_name(self):
+ """
+ Test elf_machine_to_string()
+ """
+ self.assertEqual(oe.qa.elf_machine_to_string(0x02), "SPARC")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x03), "x86")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x08), "MIPS")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x14), "PowerPC")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x28), "ARM")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x2A), "SuperH")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x32), "IA-64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0x3E), "x86-64")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
+
+ self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
+ self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
+ self.assertEqual(oe.qa.elf_machine_to_string("foobar"), "Unknown ('foobar')")
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/license.py b/poky/meta/lib/oeqa/selftest/cases/oelib/license.py
new file mode 100644
index 000000000..d7f91fb2f
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/license.py
@@ -0,0 +1,99 @@
+from unittest.case import TestCase
+import oe.license
+
+class SeenVisitor(oe.license.LicenseVisitor):
+ def __init__(self):
+ self.seen = []
+ oe.license.LicenseVisitor.__init__(self)
+
+ def visit_Str(self, node):
+ self.seen.append(node.s)
+
+class TestSingleLicense(TestCase):
+ licenses = [
+ "GPLv2",
+ "LGPL-2.0",
+ "Artistic",
+ "MIT",
+ "GPLv3+",
+ "FOO_BAR",
+ ]
+ invalid_licenses = ["GPL/BSD"]
+
+ @staticmethod
+ def parse(licensestr):
+ visitor = SeenVisitor()
+ visitor.visit_string(licensestr)
+ return visitor.seen
+
+ def test_single_licenses(self):
+ for license in self.licenses:
+ licenses = self.parse(license)
+ self.assertListEqual(licenses, [license])
+
+ def test_invalid_licenses(self):
+ for license in self.invalid_licenses:
+ with self.assertRaises(oe.license.InvalidLicense) as cm:
+ self.parse(license)
+ self.assertEqual(cm.exception.license, license)
+
+class TestSimpleCombinations(TestCase):
+ tests = {
+ "FOO&BAR": ["FOO", "BAR"],
+ "BAZ & MOO": ["BAZ", "MOO"],
+ "ALPHA|BETA": ["ALPHA"],
+ "BAZ&MOO|FOO": ["FOO"],
+ "FOO&BAR|BAZ": ["FOO", "BAR"],
+ }
+ preferred = ["ALPHA", "FOO", "BAR"]
+
+ def test_tests(self):
+ def choose(a, b):
+ if all(lic in self.preferred for lic in b):
+ return b
+ else:
+ return a
+
+ for license, expected in self.tests.items():
+ licenses = oe.license.flattened_licenses(license, choose)
+ self.assertListEqual(licenses, expected)
+
+class TestComplexCombinations(TestSimpleCombinations):
+ tests = {
+ "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
+ "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
+ "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
+ "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
+ }
+ preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
+
+class TestIsIncluded(TestCase):
+ tests = {
+ ("FOO | BAR", None, None):
+ [True, ["FOO"]],
+ ("FOO | BAR", None, "FOO"):
+ [True, ["BAR"]],
+ ("FOO | BAR", "BAR", None):
+ [True, ["BAR"]],
+ ("FOO | BAR & FOOBAR", "*BAR", None):
+ [True, ["BAR", "FOOBAR"]],
+ ("FOO | BAR & FOOBAR", None, "FOO*"):
+ [False, ["FOOBAR"]],
+ ("(FOO | BAR) & FOOBAR | BARFOO", None, "FOO"):
+ [True, ["BAR", "FOOBAR"]],
+ ("(FOO | BAR) & FOOBAR | BAZ & MOO & BARFOO", None, "FOO"):
+ [True, ["BAZ", "MOO", "BARFOO"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, None):
+ [True, ["GPL-3.0", "GPL-2.0", "LGPL-2.1"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0"):
+ [True, ["Proprietary"]],
+ ("GPL-3.0 & GPL-2.0 & LGPL-2.1 | Proprietary", None, "GPL-3.0 Proprietary"):
+ [False, ["GPL-3.0"]]
+ }
+
+ def test_tests(self):
+ for args, expected in self.tests.items():
+ is_included, licenses = oe.license.is_included(
+ args[0], (args[1] or '').split(), (args[2] or '').split())
+ self.assertEqual(is_included, expected[0])
+ self.assertListEqual(licenses, expected[1])
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/path.py b/poky/meta/lib/oeqa/selftest/cases/oelib/path.py
new file mode 100644
index 000000000..75a27c06f
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/path.py
@@ -0,0 +1,89 @@
+from unittest.case import TestCase
+import oe, oe.path
+import tempfile
+import os
+import errno
+import shutil
+
+class TestRealPath(TestCase):
+ DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
+ FILES = [ "etc/passwd", "b/file" ]
+ LINKS = [
+ ( "bin", "/usr/bin", "/usr/bin" ),
+ ( "binX", "usr/binX", "/usr/binX" ),
+ ( "c", "broken", "/broken" ),
+ ( "etc/passwd-1", "passwd", "/etc/passwd" ),
+ ( "etc/passwd-2", "passwd-1", "/etc/passwd" ),
+ ( "etc/passwd-3", "/etc/passwd-1", "/etc/passwd" ),
+ ( "etc/shadow-1", "/etc/shadow", "/etc/shadow" ),
+ ( "etc/shadow-2", "/etc/shadow-1", "/etc/shadow" ),
+ ( "prog-A", "bin/prog-A", "/usr/bin/prog-A" ),
+ ( "prog-B", "/bin/prog-B", "/usr/bin/prog-B" ),
+ ( "usr/bin/prog-C", "../../sbin/prog-C", "/sbin/prog-C" ),
+ ( "usr/bin/prog-D", "/sbin/prog-D", "/sbin/prog-D" ),
+ ( "usr/binX/prog-E", "../sbin/prog-E", None ),
+ ( "usr/bin/prog-F", "../../../sbin/prog-F", "/sbin/prog-F" ),
+ ( "loop", "a/loop", None ),
+ ( "a/loop", "../loop", None ),
+ ( "b/test", "file/foo", "/b/file/foo" ),
+ ]
+
+ LINKS_PHYS = [
+ ( "./", "/", "" ),
+ ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
+ ]
+
+ EXCEPTIONS = [
+ ( "loop", errno.ELOOP ),
+ ( "b/test", errno.ENOENT ),
+ ]
+
+ def __del__(self):
+ try:
+ #os.system("tree -F %s" % self.tmpdir)
+ shutil.rmtree(self.tmpdir)
+ except:
+ pass
+
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
+ self.root = os.path.join(self.tmpdir, "R")
+
+ os.mkdir(os.path.join(self.tmpdir, "_real"))
+ os.symlink("_real", self.root)
+
+ for d in self.DIRS:
+ os.mkdir(os.path.join(self.root, d))
+ for f in self.FILES:
+ open(os.path.join(self.root, f), "w")
+ for l in self.LINKS:
+ os.symlink(l[1], os.path.join(self.root, l[0]))
+
+ def __realpath(self, file, use_physdir, assume_dir = True):
+ return oe.path.realpath(os.path.join(self.root, file), self.root,
+ use_physdir, assume_dir = assume_dir)
+
+ def test_norm(self):
+ for l in self.LINKS:
+ if l[2] == None:
+ continue
+
+ target_p = self.__realpath(l[0], True)
+ target_l = self.__realpath(l[0], False)
+
+ if l[2] != False:
+ self.assertEqual(target_p, target_l)
+ self.assertEqual(l[2], target_p[len(self.root):])
+
+ def test_phys(self):
+ for l in self.LINKS_PHYS:
+ target_p = self.__realpath(l[0], True)
+ target_l = self.__realpath(l[0], False)
+
+ self.assertEqual(l[1], target_p[len(self.root):])
+ self.assertEqual(l[2], target_l[len(self.root):])
+
+ def test_loop(self):
+ for e in self.EXCEPTIONS:
+ self.assertRaisesRegex(OSError, r'\[Errno %u\]' % e[1],
+ self.__realpath, e[0], False, False)
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/types.py b/poky/meta/lib/oeqa/selftest/cases/oelib/types.py
new file mode 100644
index 000000000..6b53aa64e
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/types.py
@@ -0,0 +1,50 @@
+from unittest.case import TestCase
+from oe.maketype import create
+
+class TestBooleanType(TestCase):
+ def test_invalid(self):
+ self.assertRaises(ValueError, create, '', 'boolean')
+ self.assertRaises(ValueError, create, 'foo', 'boolean')
+ self.assertRaises(TypeError, create, object(), 'boolean')
+
+ def test_true(self):
+ self.assertTrue(create('y', 'boolean'))
+ self.assertTrue(create('yes', 'boolean'))
+ self.assertTrue(create('1', 'boolean'))
+ self.assertTrue(create('t', 'boolean'))
+ self.assertTrue(create('true', 'boolean'))
+ self.assertTrue(create('TRUE', 'boolean'))
+ self.assertTrue(create('truE', 'boolean'))
+
+ def test_false(self):
+ self.assertFalse(create('n', 'boolean'))
+ self.assertFalse(create('no', 'boolean'))
+ self.assertFalse(create('0', 'boolean'))
+ self.assertFalse(create('f', 'boolean'))
+ self.assertFalse(create('false', 'boolean'))
+ self.assertFalse(create('FALSE', 'boolean'))
+ self.assertFalse(create('faLse', 'boolean'))
+
+ def test_bool_equality(self):
+ self.assertEqual(create('n', 'boolean'), False)
+ self.assertNotEqual(create('n', 'boolean'), True)
+ self.assertEqual(create('y', 'boolean'), True)
+ self.assertNotEqual(create('y', 'boolean'), False)
+
+class TestList(TestCase):
+ def assertListEqual(self, value, valid, sep=None):
+ obj = create(value, 'list', separator=sep)
+ self.assertEqual(obj, valid)
+ if sep is not None:
+ self.assertEqual(obj.separator, sep)
+ self.assertEqual(str(obj), obj.separator.join(obj))
+
+ def test_list_nosep(self):
+ testlist = ['alpha', 'beta', 'theta']
+ self.assertListEqual('alpha beta theta', testlist)
+ self.assertListEqual('alpha beta\ttheta', testlist)
+ self.assertListEqual('alpha', ['alpha'])
+
+ def test_list_usersep(self):
+ self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
+ self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py b/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py
new file mode 100644
index 000000000..9fb6c1576
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oelib/utils.py
@@ -0,0 +1,51 @@
+from unittest.case import TestCase
+from oe.utils import packages_filter_out_system, trim_version
+
+class TestPackagesFilterOutSystem(TestCase):
+ def test_filter(self):
+ """
+ Test that oe.utils.packages_filter_out_system works.
+ """
+ try:
+ import bb
+ except ImportError:
+ self.skipTest("Cannot import bb")
+
+ d = bb.data_smart.DataSmart()
+ d.setVar("PN", "foo")
+
+ d.setVar("PACKAGES", "foo foo-doc foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+ d.setVar("PACKAGES", "foo foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, [])
+
+ d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
+ pkgs = packages_filter_out_system(d)
+ self.assertEqual(pkgs, ["foo-data"])
+
+
+class TestTrimVersion(TestCase):
+ def test_version_exception(self):
+ with self.assertRaises(TypeError):
+ trim_version(None, 2)
+ with self.assertRaises(TypeError):
+ trim_version((1, 2, 3), 2)
+
+ def test_num_exception(self):
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", 0)
+ with self.assertRaises(ValueError):
+ trim_version("1.2.3", -1)
+
+ def test_valid(self):
+ self.assertEqual(trim_version("1.2.3", 1), "1")
+ self.assertEqual(trim_version("1.2.3", 2), "1.2")
+ self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
+ self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/poky/meta/lib/oeqa/selftest/cases/oescripts.py b/poky/meta/lib/oeqa/selftest/cases/oescripts.py
new file mode 100644
index 000000000..1ee753763
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/oescripts.py
@@ -0,0 +1,15 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.selftest.cases.buildhistory import BuildhistoryBase
+from oeqa.utils.commands import Command, runCmd, bitbake, get_bb_var, get_test_layer
+from oeqa.core.decorator.oeid import OETestID
+
+class BuildhistoryDiffTests(BuildhistoryBase):
+
+ @OETestID(295)
+ def test_buildhistory_diff(self):
+ target = 'xcursor-transparent-theme'
+ self.run_buildhistory_operation(target, target_config="PR = \"r1\"", change_bh_location=True)
+ self.run_buildhistory_operation(target, target_config="PR = \"r0\"", change_bh_location=False, expect_error=True)
+ result = runCmd("buildhistory-diff -p %s" % get_bb_var('BUILDHISTORY_DIR'))
+ expected_output = 'PR changed from "r1" to "r0"'
+ self.assertTrue(expected_output in result.output, msg="Did not find expected output: %s" % result.output)
diff --git a/poky/meta/lib/oeqa/selftest/cases/package.py b/poky/meta/lib/oeqa/selftest/cases/package.py
new file mode 100644
index 000000000..169698f78
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/package.py
@@ -0,0 +1,86 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.commands import bitbake, get_bb_vars
+import subprocess, os
+import oe.path
+
+class VersionOrdering(OESelftestTestCase):
+ # version1, version2, sort order
+ tests = (
+ ("1.0", "1.0", 0),
+ ("1.0", "2.0", -1),
+ ("2.0", "1.0", 1),
+ ("2.0-rc", "2.0", 1),
+ ("2.0~rc", "2.0", -1),
+ ("1.2rc2", "1.2.0", -1)
+ )
+
+ @classmethod
+ def setUpClass(cls):
+ super().setUpClass()
+
+ # Build the tools we need and populate a sysroot
+ bitbake("dpkg-native opkg-native rpm-native python3-native")
+ bitbake("build-sysroots -c build_native_sysroot")
+
+ # Get the paths so we can point into the sysroot correctly
+ vars = get_bb_vars(["STAGING_DIR", "BUILD_ARCH", "bindir_native", "libdir_native"])
+ cls.staging = oe.path.join(vars["STAGING_DIR"], vars["BUILD_ARCH"])
+ cls.bindir = oe.path.join(cls.staging, vars["bindir_native"])
+ cls.libdir = oe.path.join(cls.staging, vars["libdir_native"])
+
+ def setUp(self):
+ # Just for convenience
+ self.staging = type(self).staging
+ self.bindir = type(self).bindir
+ self.libdir = type(self).libdir
+
+ @OETestID(1880)
+ def test_dpkg(self):
+ for ver1, ver2, sort in self.tests:
+ op = { -1: "<<", 0: "=", 1: ">>" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: ">>", 0: ">>", 1: "<<" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: "=", 0: "<<", 1: "=" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "dpkg"), "--compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ @OETestID(1881)
+ def test_opkg(self):
+ for ver1, ver2, sort in self.tests:
+ op = { -1: "<<", 0: "=", 1: ">>" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: ">>", 0: ">>", 1: "<<" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ # Now do it again but with incorrect operations
+ op = { -1: "=", 0: "<<", 1: "=" }[sort]
+ status = subprocess.call((oe.path.join(self.bindir, "opkg"), "compare-versions", ver1, op, ver2))
+ self.assertNotEqual(status, 0, "%s %s %s failed" % (ver1, op, ver2))
+
+ @OETestID(1882)
+ def test_rpm(self):
+ # Need to tell the Python bindings where to find its configuration
+ env = os.environ.copy()
+ env["RPM_CONFIGDIR"] = oe.path.join(self.libdir, "rpm")
+
+ for ver1, ver2, sort in self.tests:
+ # The only way to test rpm is via the Python module, so we need to
+ # execute python3-native. labelCompare returns -1/0/1 (like strcmp)
+ # so add 100 and use that as the exit code.
+ command = (oe.path.join(self.bindir, "python3-native", "python3"), "-c",
+ "import sys, rpm; v1=(None, \"%s\", None); v2=(None, \"%s\", None); sys.exit(rpm.labelCompare(v1, v2) + 100)" % (ver1, ver2))
+ status = subprocess.call(command, env=env)
+ self.assertIn(status, (99, 100, 101))
+ self.assertEqual(status - 100, sort, "%s %s (%d) failed" % (ver1, ver2, sort))
diff --git a/poky/meta/lib/oeqa/selftest/cases/pkgdata.py b/poky/meta/lib/oeqa/selftest/cases/pkgdata.py
new file mode 100644
index 000000000..0b4caf1b2
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/pkgdata.py
@@ -0,0 +1,224 @@
+import os
+import tempfile
+import fnmatch
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+from oeqa.core.decorator.oeid import OETestID
+
+class OePkgdataUtilTests(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(OePkgdataUtilTests, cls).setUpClass()
+ # Ensure we have the right data in pkgdata
+ cls.logger.info('Running bitbake to generate pkgdata')
+ bitbake('busybox zlib m4')
+
+ @OETestID(1203)
+ def test_lookup_pkg(self):
+ # Forward tests
+ result = runCmd('oe-pkgdata-util lookup-pkg "zlib busybox"')
+ self.assertEqual(result.output, 'libz1\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-pkg zlib-dev')
+ self.assertEqual(result.output, 'libz-dev')
+ result = runCmd('oe-pkgdata-util lookup-pkg nonexistentpkg', ignore_status=True)
+ self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
+ self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
+ # Reverse tests
+ result = runCmd('oe-pkgdata-util lookup-pkg -r "libz1 busybox"')
+ self.assertEqual(result.output, 'zlib\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-pkg -r libz-dev')
+ self.assertEqual(result.output, 'zlib-dev')
+ result = runCmd('oe-pkgdata-util lookup-pkg -r nonexistentpkg', ignore_status=True)
+ self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
+ self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
+
+ @OETestID(1205)
+ def test_read_value(self):
+ result = runCmd('oe-pkgdata-util read-value PN libz1')
+ self.assertEqual(result.output, 'zlib')
+ result = runCmd('oe-pkgdata-util read-value PKG libz1')
+ self.assertEqual(result.output, 'libz1')
+ result = runCmd('oe-pkgdata-util read-value PKGSIZE m4')
+ pkgsize = int(result.output.strip())
+ self.assertGreater(pkgsize, 1, "Size should be greater than 1. %s" % result.output)
+
+ @OETestID(1198)
+ def test_find_path(self):
+ result = runCmd('oe-pkgdata-util find-path /lib/libz.so.1')
+ self.assertEqual(result.output, 'zlib: /lib/libz.so.1')
+ result = runCmd('oe-pkgdata-util find-path /usr/bin/m4')
+ self.assertEqual(result.output, 'm4: /usr/bin/m4')
+ result = runCmd('oe-pkgdata-util find-path /not/exist', ignore_status=True)
+ self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
+ self.assertEqual(result.output, 'ERROR: Unable to find any package producing path /not/exist')
+
+ @OETestID(1204)
+ def test_lookup_recipe(self):
+ result = runCmd('oe-pkgdata-util lookup-recipe "libz-staticdev busybox"')
+ self.assertEqual(result.output, 'zlib\nbusybox')
+ result = runCmd('oe-pkgdata-util lookup-recipe libz-dbg')
+ self.assertEqual(result.output, 'zlib')
+ result = runCmd('oe-pkgdata-util lookup-recipe nonexistentpkg', ignore_status=True)
+ self.assertEqual(result.status, 1, "Status different than 1. output: %s" % result.output)
+ self.assertEqual(result.output, 'ERROR: The following packages could not be found: nonexistentpkg')
+
+ @OETestID(1202)
+ def test_list_pkgs(self):
+ # No arguments
+ result = runCmd('oe-pkgdata-util list-pkgs')
+ pkglist = result.output.split()
+ self.assertIn('zlib', pkglist, "Listed packages: %s" % result.output)
+ self.assertIn('zlib-dev', pkglist, "Listed packages: %s" % result.output)
+ # No pkgspec, runtime
+ result = runCmd('oe-pkgdata-util list-pkgs -r')
+ pkglist = result.output.split()
+ self.assertIn('libz-dev', pkglist, "Listed packages: %s" % result.output)
+ # With recipe specified
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib')
+ pkglist = sorted(result.output.split())
+ try:
+ pkglist.remove('zlib-ptest') # in case ptest is disabled
+ except ValueError:
+ pass
+ self.assertEqual(pkglist, ['zlib', 'zlib-dbg', 'zlib-dev', 'zlib-doc', 'zlib-staticdev'], "Packages listed after remove: %s" % result.output)
+ # With recipe specified, runtime
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib -r')
+ pkglist = sorted(result.output.split())
+ try:
+ pkglist.remove('libz-ptest') # in case ptest is disabled
+ except ValueError:
+ pass
+ self.assertEqual(pkglist, ['libz-dbg', 'libz-dev', 'libz-doc', 'libz-staticdev', 'libz1'], "Packages listed after remove: %s" % result.output)
+ # With recipe specified and unpackaged
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib -u')
+ pkglist = sorted(result.output.split())
+ self.assertIn('zlib-locale', pkglist, "Listed packages: %s" % result.output)
+ # With recipe specified and unpackaged, runtime
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib -u -r')
+ pkglist = sorted(result.output.split())
+ self.assertIn('libz-locale', pkglist, "Listed packages: %s" % result.output)
+ # With recipe specified and pkgspec
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib "*-d*"')
+ pkglist = sorted(result.output.split())
+ self.assertEqual(pkglist, ['zlib-dbg', 'zlib-dev', 'zlib-doc'], "Packages listed: %s" % result.output)
+ # With recipe specified and pkgspec, runtime
+ result = runCmd('oe-pkgdata-util list-pkgs -p zlib -r "*-d*"')
+ pkglist = sorted(result.output.split())
+ self.assertEqual(pkglist, ['libz-dbg', 'libz-dev', 'libz-doc'], "Packages listed: %s" % result.output)
+
+ @OETestID(1201)
+ def test_list_pkg_files(self):
+ def splitoutput(output):
+ files = {}
+ curpkg = None
+ for line in output.splitlines():
+ if line.startswith('\t'):
+ self.assertTrue(curpkg, 'Unexpected non-package line:\n%s' % line)
+ files[curpkg].append(line.strip())
+ else:
+ self.assertTrue(line.rstrip().endswith(':'), 'Invalid package line in output:\n%s' % line)
+ curpkg = line.split(':')[0]
+ files[curpkg] = []
+ return files
+ bb_vars = get_bb_vars(['base_libdir', 'libdir', 'includedir', 'mandir'])
+ base_libdir = bb_vars['base_libdir']
+ libdir = bb_vars['libdir']
+ includedir = bb_vars['includedir']
+ mandir = bb_vars['mandir']
+ # Test recipe-space package name
+ result = runCmd('oe-pkgdata-util list-pkg-files zlib-dev zlib-doc')
+ files = splitoutput(result.output)
+ self.assertIn('zlib-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-doc', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
+ self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
+ # Test runtime package name
+ result = runCmd('oe-pkgdata-util list-pkg-files -r libz1 libz-dev')
+ files = splitoutput(result.output)
+ self.assertIn('libz1', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertGreater(len(files['libz1']), 1)
+ libspec = os.path.join(base_libdir, 'libz.so.1.*')
+ found = False
+ for fileitem in files['libz1']:
+ if fnmatch.fnmatchcase(fileitem, libspec):
+ found = True
+ break
+ self.assertTrue(found, 'Could not find zlib library file %s in libz1 package file list: %s' % (libspec, files['libz1']))
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
+ # Test recipe
+ result = runCmd('oe-pkgdata-util list-pkg-files -p zlib')
+ files = splitoutput(result.output)
+ self.assertIn('zlib-dbg', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-doc', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-staticdev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertNotIn('zlib-locale', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ # (ignore ptest, might not be there depending on config)
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
+ self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
+ self.assertIn(os.path.join(libdir, 'libz.a'), files['zlib-staticdev'])
+ # Test recipe, runtime
+ result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -r')
+ files = splitoutput(result.output)
+ self.assertIn('libz-dbg', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-doc', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-staticdev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz1', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertNotIn('libz-locale', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
+ self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['libz-doc'])
+ self.assertIn(os.path.join(libdir, 'libz.a'), files['libz-staticdev'])
+ # Test recipe, unpackaged
+ result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -u')
+ files = splitoutput(result.output)
+ self.assertIn('zlib-dbg', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-doc', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-staticdev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('zlib-locale', list(files.keys()), "listed pkgs. files: %s" %result.output) # this is the key one
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
+ self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
+ self.assertIn(os.path.join(libdir, 'libz.a'), files['zlib-staticdev'])
+ # Test recipe, runtime, unpackaged
+ result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -r -u')
+ files = splitoutput(result.output)
+ self.assertIn('libz-dbg', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-doc', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-dev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-staticdev', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz1', list(files.keys()), "listed pkgs. files: %s" %result.output)
+ self.assertIn('libz-locale', list(files.keys()), "listed pkgs. files: %s" %result.output) # this is the key one
+ self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
+ self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['libz-doc'])
+ self.assertIn(os.path.join(libdir, 'libz.a'), files['libz-staticdev'])
+
+ @OETestID(1200)
+ def test_glob(self):
+ tempdir = tempfile.mkdtemp(prefix='pkgdataqa')
+ self.track_for_cleanup(tempdir)
+ pkglistfile = os.path.join(tempdir, 'pkglist')
+ with open(pkglistfile, 'w') as f:
+ f.write('libz1\n')
+ f.write('busybox\n')
+ result = runCmd('oe-pkgdata-util glob %s "*-dev"' % pkglistfile)
+ desiredresult = ['libz-dev', 'busybox-dev']
+ self.assertEqual(sorted(result.output.split()), sorted(desiredresult))
+ # The following should not error (because when we use this during rootfs construction, sometimes the complementary package won't exist)
+ result = runCmd('oe-pkgdata-util glob %s "*-nonexistent"' % pkglistfile)
+ self.assertEqual(result.output, '')
+ # Test exclude option
+ result = runCmd('oe-pkgdata-util glob %s "*-dev *-dbg" -x "^libz"' % pkglistfile)
+ resultlist = result.output.split()
+ self.assertNotIn('libz-dev', resultlist)
+ self.assertNotIn('libz-dbg', resultlist)
+
+ @OETestID(1206)
+ def test_specify_pkgdatadir(self):
+ result = runCmd('oe-pkgdata-util -p %s lookup-pkg zlib' % get_bb_var('PKGDATA_DIR'))
+ self.assertEqual(result.output, 'libz1')
diff --git a/poky/meta/lib/oeqa/selftest/cases/prservice.py b/poky/meta/lib/oeqa/selftest/cases/prservice.py
new file mode 100644
index 000000000..479e52061
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/prservice.py
@@ -0,0 +1,131 @@
+import os
+import re
+import shutil
+import datetime
+
+import oeqa.utils.ftools as ftools
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.network import get_free_port
+
+class BitbakePrTests(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(BitbakePrTests, cls).setUpClass()
+ cls.pkgdata_dir = get_bb_var('PKGDATA_DIR')
+
+ def get_pr_version(self, package_name):
+ package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name)
+ package_data = ftools.read_file(package_data_file)
+ find_pr = re.search("PKGR: r[0-9]+\.([0-9]+)", package_data)
+ self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file)
+ return int(find_pr.group(1))
+
+ def get_task_stamp(self, package_name, recipe_task):
+ stampdata = get_bb_var('STAMP', target=package_name).split('/')
+ prefix = stampdata[-1]
+ package_stamps_path = "/".join(stampdata[:-1])
+ stamps = []
+ for stamp in os.listdir(package_stamps_path):
+ find_stamp = re.match("%s\.%s\.([a-z0-9]{32})" % (re.escape(prefix), recipe_task), stamp)
+ if find_stamp:
+ stamps.append(find_stamp.group(1))
+ self.assertFalse(len(stamps) == 0, msg="Cound not find stamp for task %s for recipe %s" % (recipe_task, package_name))
+ self.assertFalse(len(stamps) > 1, msg="Found multiple %s stamps for the %s recipe in the %s directory." % (recipe_task, package_name, package_stamps_path))
+ return str(stamps[0])
+
+ def increment_package_pr(self, package_name):
+ inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now()
+ self.write_recipeinc(package_name, inc_data)
+ res = bitbake(package_name, ignore_status=True)
+ self.delete_recipeinc(package_name)
+ self.assertEqual(res.status, 0, msg=res.output)
+
+ def config_pr_tests(self, package_name, package_type='rpm', pr_socket='localhost:0'):
+ config_package_data = 'PACKAGE_CLASSES = "package_%s"' % package_type
+ self.write_config(config_package_data)
+ config_server_data = 'PRSERV_HOST = "%s"' % pr_socket
+ self.append_config(config_server_data)
+
+ def run_test_pr_service(self, package_name, package_type='rpm', track_task='do_package', pr_socket='localhost:0'):
+ self.config_pr_tests(package_name, package_type, pr_socket)
+
+ self.increment_package_pr(package_name)
+ pr_1 = self.get_pr_version(package_name)
+ stamp_1 = self.get_task_stamp(package_name, track_task)
+
+ self.increment_package_pr(package_name)
+ pr_2 = self.get_pr_version(package_name)
+ stamp_2 = self.get_task_stamp(package_name, track_task)
+
+ self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
+ self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1)
+
+ def run_test_pr_export_import(self, package_name, replace_current_db=True):
+ self.config_pr_tests(package_name)
+
+ self.increment_package_pr(package_name)
+ pr_1 = self.get_pr_version(package_name)
+
+ exported_db_path = os.path.join(self.builddir, 'export.inc')
+ export_result = runCmd("bitbake-prserv-tool export %s" % exported_db_path, ignore_status=True)
+ self.assertEqual(export_result.status, 0, msg="PR Service database export failed: %s" % export_result.output)
+ self.assertTrue(os.path.exists(exported_db_path))
+
+ if replace_current_db:
+ current_db_path = os.path.join(get_bb_var('PERSISTENT_DIR'), 'prserv.sqlite3')
+ self.assertTrue(os.path.exists(current_db_path), msg="Path to current PR Service database is invalid: %s" % current_db_path)
+ os.remove(current_db_path)
+
+ import_result = runCmd("bitbake-prserv-tool import %s" % exported_db_path, ignore_status=True)
+ os.remove(exported_db_path)
+ self.assertEqual(import_result.status, 0, msg="PR Service database import failed: %s" % import_result.output)
+
+ self.increment_package_pr(package_name)
+ pr_2 = self.get_pr_version(package_name)
+
+ self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1")
+
+ @OETestID(930)
+ def test_import_export_replace_db(self):
+ self.run_test_pr_export_import('m4')
+
+ @OETestID(931)
+ def test_import_export_override_db(self):
+ self.run_test_pr_export_import('m4', replace_current_db=False)
+
+ @OETestID(932)
+ def test_pr_service_rpm_arch_dep(self):
+ self.run_test_pr_service('m4', 'rpm', 'do_package')
+
+ @OETestID(934)
+ def test_pr_service_deb_arch_dep(self):
+ self.run_test_pr_service('m4', 'deb', 'do_package')
+
+ @OETestID(933)
+ def test_pr_service_ipk_arch_dep(self):
+ self.run_test_pr_service('m4', 'ipk', 'do_package')
+
+ @OETestID(935)
+ def test_pr_service_rpm_arch_indep(self):
+ self.run_test_pr_service('xcursor-transparent-theme', 'rpm', 'do_package')
+
+ @OETestID(937)
+ def test_pr_service_deb_arch_indep(self):
+ self.run_test_pr_service('xcursor-transparent-theme', 'deb', 'do_package')
+
+ @OETestID(936)
+ def test_pr_service_ipk_arch_indep(self):
+ self.run_test_pr_service('xcursor-transparent-theme', 'ipk', 'do_package')
+
+ @OETestID(1419)
+ def test_stopping_prservice_message(self):
+ port = get_free_port()
+
+ runCmd('bitbake-prserv --host localhost --port %s --loglevel=DEBUG --start' % port)
+ ret = runCmd('bitbake-prserv --host localhost --port %s --loglevel=DEBUG --stop' % port)
+
+ self.assertEqual(ret.status, 0)
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/recipetool.py b/poky/meta/lib/oeqa/selftest/cases/recipetool.py
new file mode 100644
index 000000000..754ea9498
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/recipetool.py
@@ -0,0 +1,698 @@
+import os
+import shutil
+import tempfile
+import urllib.parse
+
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var
+from oeqa.utils.commands import get_bb_vars, create_temp_layer
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.selftest.cases import devtool
+
+templayerdir = None
+
+def setUpModule():
+ global templayerdir
+ templayerdir = tempfile.mkdtemp(prefix='recipetoolqa')
+ create_temp_layer(templayerdir, 'selftestrecipetool')
+ runCmd('bitbake-layers add-layer %s' % templayerdir)
+
+
+def tearDownModule():
+ runCmd('bitbake-layers remove-layer %s' % templayerdir, ignore_status=True)
+ runCmd('rm -rf %s' % templayerdir)
+
+
+class RecipetoolBase(devtool.DevtoolBase):
+
+ def setUpLocal(self):
+ super(RecipetoolBase, self).setUpLocal()
+ self.templayerdir = templayerdir
+ self.tempdir = tempfile.mkdtemp(prefix='recipetoolqa')
+ self.track_for_cleanup(self.tempdir)
+ self.testfile = os.path.join(self.tempdir, 'testfile')
+ with open(self.testfile, 'w') as f:
+ f.write('Test file\n')
+
+ def tearDownLocal(self):
+ runCmd('rm -rf %s/recipes-*' % self.templayerdir)
+ super(RecipetoolBase, self).tearDownLocal()
+
+ def _try_recipetool_appendcmd(self, cmd, testrecipe, expectedfiles, expectedlines=None):
+ result = runCmd(cmd)
+ self.assertNotIn('Traceback', result.output)
+
+ # Check the bbappend was created and applies properly
+ recipefile = get_bb_var('FILE', testrecipe)
+ bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
+
+ # Check the bbappend contents
+ if expectedlines is not None:
+ with open(bbappendfile, 'r') as f:
+ self.assertEqual(expectedlines, f.readlines(), "Expected lines are not present in %s" % bbappendfile)
+
+ # Check file was copied
+ filesdir = os.path.join(os.path.dirname(bbappendfile), testrecipe)
+ for expectedfile in expectedfiles:
+ self.assertTrue(os.path.isfile(os.path.join(filesdir, expectedfile)), 'Expected file %s to be copied next to bbappend, but it wasn\'t' % expectedfile)
+
+ # Check no other files created
+ createdfiles = []
+ for root, _, files in os.walk(filesdir):
+ for f in files:
+ createdfiles.append(os.path.relpath(os.path.join(root, f), filesdir))
+ self.assertTrue(sorted(createdfiles), sorted(expectedfiles))
+
+ return bbappendfile, result.output
+
+
+class RecipetoolTests(RecipetoolBase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(RecipetoolTests, cls).setUpClass()
+ # Ensure we have the right data in shlibs/pkgdata
+ cls.logger.info('Running bitbake to generate pkgdata')
+ bitbake('-c packagedata base-files coreutils busybox selftest-recipetool-appendfile')
+ bb_vars = get_bb_vars(['COREBASE', 'BBPATH'])
+ cls.corebase = bb_vars['COREBASE']
+ cls.bbpath = bb_vars['BBPATH']
+
+ def _try_recipetool_appendfile(self, testrecipe, destfile, newfile, options, expectedlines, expectedfiles):
+ cmd = 'recipetool appendfile %s %s %s %s' % (self.templayerdir, destfile, newfile, options)
+ return self._try_recipetool_appendcmd(cmd, testrecipe, expectedfiles, expectedlines)
+
+ def _try_recipetool_appendfile_fail(self, destfile, newfile, checkerror):
+ cmd = 'recipetool appendfile %s %s %s' % (self.templayerdir, destfile, newfile)
+ result = runCmd(cmd, ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'Command "%s" should have failed but didn\'t' % cmd)
+ self.assertNotIn('Traceback', result.output)
+ for errorstr in checkerror:
+ self.assertIn(errorstr, result.output)
+
+ @OETestID(1177)
+ def test_recipetool_appendfile_basic(self):
+ # Basic test
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('base-files', '/etc/motd', self.testfile, '', expectedlines, ['motd'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1183)
+ def test_recipetool_appendfile_invalid(self):
+ # Test some commands that should error
+ self._try_recipetool_appendfile_fail('/etc/passwd', self.testfile, ['ERROR: /etc/passwd cannot be handled by this tool', 'useradd', 'extrausers'])
+ self._try_recipetool_appendfile_fail('/etc/timestamp', self.testfile, ['ERROR: /etc/timestamp cannot be handled by this tool'])
+ self._try_recipetool_appendfile_fail('/dev/console', self.testfile, ['ERROR: /dev/console cannot be handled by this tool'])
+
+ @OETestID(1176)
+ def test_recipetool_appendfile_alternatives(self):
+ # Now try with a file we know should be an alternative
+ # (this is very much a fake example, but one we know is reliably an alternative)
+ self._try_recipetool_appendfile_fail('/bin/ls', self.testfile, ['ERROR: File /bin/ls is an alternative possibly provided by the following recipes:', 'coreutils', 'busybox'])
+ # Need a test file - should be executable
+ testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
+ testfile2name = os.path.basename(testfile2)
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://%s"\n' % testfile2name,
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${base_bindir}\n',
+ ' install -m 0755 ${WORKDIR}/%s ${D}${base_bindir}/ls\n' % testfile2name,
+ '}\n']
+ self._try_recipetool_appendfile('coreutils', '/bin/ls', testfile2, '-r coreutils', expectedlines, [testfile2name])
+ # Now try bbappending the same file again, contents should not change
+ bbappendfile, _ = self._try_recipetool_appendfile('coreutils', '/bin/ls', self.testfile, '-r coreutils', expectedlines, [testfile2name])
+ # But file should have
+ copiedfile = os.path.join(os.path.dirname(bbappendfile), 'coreutils', testfile2name)
+ result = runCmd('diff -q %s %s' % (testfile2, copiedfile), ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'New file should have been copied but was not %s' % result.output)
+
+ @OETestID(1178)
+ def test_recipetool_appendfile_binary(self):
+ # Try appending a binary file
+ # /bin/ls can be a symlink to /usr/bin/ls
+ ls = os.path.realpath("/bin/ls")
+ result = runCmd('recipetool appendfile %s /bin/ls %s -r coreutils' % (self.templayerdir, ls))
+ self.assertIn('WARNING: ', result.output)
+ self.assertIn('is a binary', result.output)
+
+ @OETestID(1173)
+ def test_recipetool_appendfile_add(self):
+ # Try arbitrary file add to a recipe
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
+ '}\n']
+ self._try_recipetool_appendfile('netbase', '/usr/share/something', self.testfile, '-r netbase', expectedlines, ['testfile'])
+ # Try adding another file, this time where the source file is executable
+ # (so we're testing that, plus modifying an existing bbappend)
+ testfile2 = os.path.join(self.corebase, 'oe-init-build-env')
+ testfile2name = os.path.basename(testfile2)
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile \\\n',
+ ' file://%s \\\n' % testfile2name,
+ ' "\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
+ ' install -m 0755 ${WORKDIR}/%s ${D}${datadir}/scriptname\n' % testfile2name,
+ '}\n']
+ self._try_recipetool_appendfile('netbase', '/usr/share/scriptname', testfile2, '-r netbase', expectedlines, ['testfile', testfile2name])
+
+ @OETestID(1174)
+ def test_recipetool_appendfile_add_bindir(self):
+ # Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${bindir}\n',
+ ' install -m 0755 ${WORKDIR}/testfile ${D}${bindir}/selftest-recipetool-testbin\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('netbase', '/usr/bin/selftest-recipetool-testbin', self.testfile, '-r netbase', expectedlines, ['testfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1175)
+ def test_recipetool_appendfile_add_machine(self):
+ # Try arbitrary file add to a recipe, this time to a location such that should be installed as executable
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'PACKAGE_ARCH = "${MACHINE_ARCH}"\n',
+ '\n',
+ 'SRC_URI_append_mymachine = " file://testfile"\n',
+ '\n',
+ 'do_install_append_mymachine() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/something\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('netbase', '/usr/share/something', self.testfile, '-r netbase -m mymachine', expectedlines, ['mymachine/testfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1184)
+ def test_recipetool_appendfile_orig(self):
+ # A file that's in SRC_URI and in do_install with the same name
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-orig', self.testfile, '', expectedlines, ['selftest-replaceme-orig'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1191)
+ def test_recipetool_appendfile_todir(self):
+ # A file that's in SRC_URI and in do_install with destination directory rather than file
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-todir', self.testfile, '', expectedlines, ['selftest-replaceme-todir'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1187)
+ def test_recipetool_appendfile_renamed(self):
+ # A file that's in SRC_URI with a different name to the destination file
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-renamed', self.testfile, '', expectedlines, ['file1'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1190)
+ def test_recipetool_appendfile_subdir(self):
+ # A file that's in SRC_URI in a subdir
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-subdir\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-subdir', self.testfile, '', expectedlines, ['testfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1189)
+ def test_recipetool_appendfile_src_glob(self):
+ # A file that's in SRC_URI as a glob
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-src-globfile\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-src-globfile', self.testfile, '', expectedlines, ['testfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1181)
+ def test_recipetool_appendfile_inst_glob(self):
+ # A file that's in do_install as a glob
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-globfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1182)
+ def test_recipetool_appendfile_inst_todir_glob(self):
+ # A file that's in do_install as a glob with destination as a directory
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-todir-globfile', self.testfile, '', expectedlines, ['selftest-replaceme-inst-todir-globfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1185)
+ def test_recipetool_appendfile_patch(self):
+ # A file that's added by a patch in SRC_URI
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${sysconfdir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${sysconfdir}/selftest-replaceme-patched\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/etc/selftest-replaceme-patched', self.testfile, '', expectedlines, ['testfile'])
+ for line in output.splitlines():
+ if 'WARNING: ' in line:
+ self.assertIn('add-file.patch', line, 'Unexpected warning found in output:\n%s' % line)
+ break
+ else:
+ self.fail('Patch warning not found in output:\n%s' % output)
+
+ @OETestID(1188)
+ def test_recipetool_appendfile_script(self):
+ # Now, a file that's in SRC_URI but installed by a script (so no mention in do_install)
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-scripted\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-scripted', self.testfile, '', expectedlines, ['testfile'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1180)
+ def test_recipetool_appendfile_inst_func(self):
+ # A file that's installed from a function called by do_install
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-inst-func', self.testfile, '', expectedlines, ['selftest-replaceme-inst-func'])
+ self.assertNotIn('WARNING: ', output)
+
+ @OETestID(1186)
+ def test_recipetool_appendfile_postinstall(self):
+ # A file that's created by a postinstall script (and explicitly mentioned in it)
+ # First try without specifying recipe
+ self._try_recipetool_appendfile_fail('/usr/share/selftest-replaceme-postinst', self.testfile, ['File /usr/share/selftest-replaceme-postinst may be written out in a pre/postinstall script of the following recipes:', 'selftest-recipetool-appendfile'])
+ # Now specify recipe
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n',
+ 'SRC_URI += "file://testfile"\n',
+ '\n',
+ 'do_install_append() {\n',
+ ' install -d ${D}${datadir}\n',
+ ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-postinst\n',
+ '}\n']
+ _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-postinst', self.testfile, '-r selftest-recipetool-appendfile', expectedlines, ['testfile'])
+
+ @OETestID(1179)
+ def test_recipetool_appendfile_extlayer(self):
+ # Try creating a bbappend in a layer that's not in bblayers.conf and has a different structure
+ exttemplayerdir = os.path.join(self.tempdir, 'extlayer')
+ self._create_temp_layer(exttemplayerdir, False, 'oeselftestextlayer', recipepathspec='metadata/recipes/recipes-*/*')
+ result = runCmd('recipetool appendfile %s /usr/share/selftest-replaceme-orig %s' % (exttemplayerdir, self.testfile))
+ self.assertNotIn('Traceback', result.output)
+ createdfiles = []
+ for root, _, files in os.walk(exttemplayerdir):
+ for f in files:
+ createdfiles.append(os.path.relpath(os.path.join(root, f), exttemplayerdir))
+ createdfiles.remove('conf/layer.conf')
+ expectedfiles = ['metadata/recipes/recipes-test/selftest-recipetool-appendfile/selftest-recipetool-appendfile.bbappend',
+ 'metadata/recipes/recipes-test/selftest-recipetool-appendfile/selftest-recipetool-appendfile/selftest-replaceme-orig']
+ self.assertEqual(sorted(createdfiles), sorted(expectedfiles))
+
+ @OETestID(1192)
+ def test_recipetool_appendfile_wildcard(self):
+
+ def try_appendfile_wc(options):
+ result = runCmd('recipetool appendfile %s /etc/profile %s %s' % (self.templayerdir, self.testfile, options))
+ self.assertNotIn('Traceback', result.output)
+ bbappendfile = None
+ for root, _, files in os.walk(self.templayerdir):
+ for f in files:
+ if f.endswith('.bbappend'):
+ bbappendfile = f
+ break
+ if not bbappendfile:
+ self.fail('No bbappend file created')
+ runCmd('rm -rf %s/recipes-*' % self.templayerdir)
+ return bbappendfile
+
+ # Check without wildcard option
+ recipefn = os.path.basename(get_bb_var('FILE', 'base-files'))
+ filename = try_appendfile_wc('')
+ self.assertEqual(filename, recipefn.replace('.bb', '.bbappend'))
+ # Now check with wildcard option
+ filename = try_appendfile_wc('-w')
+ self.assertEqual(filename, recipefn.split('_')[0] + '_%.bbappend')
+
+ @OETestID(1193)
+ def test_recipetool_create(self):
+ # Try adding a recipe
+ tempsrc = os.path.join(self.tempdir, 'srctree')
+ os.makedirs(tempsrc)
+ recipefile = os.path.join(self.tempdir, 'logrotate_3.12.3.bb')
+ srcuri = 'https://github.com/logrotate/logrotate/releases/download/3.12.3/logrotate-3.12.3.tar.xz'
+ result = runCmd('recipetool create -o %s %s -x %s' % (recipefile, srcuri, tempsrc))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = 'GPLv2'
+ checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'
+ checkvars['SRC_URI'] = 'https://github.com/logrotate/logrotate/releases/download/${PV}/logrotate-${PV}.tar.xz'
+ checkvars['SRC_URI[md5sum]'] = 'a560c57fac87c45b2fc17406cdf79288'
+ checkvars['SRC_URI[sha256sum]'] = '2e6a401cac9024db2288297e3be1a8ab60e7401ba8e91225218aaf4a27e82a07'
+ self._test_recipe_contents(recipefile, checkvars, [])
+
+ @OETestID(1194)
+ def test_recipetool_create_git(self):
+ if 'x11' not in get_bb_var('DISTRO_FEATURES'):
+ self.skipTest('Test requires x11 as distro feature')
+ # Ensure we have the right data in shlibs/pkgdata
+ bitbake('libpng pango libx11 libxext jpeg libcheck')
+ # Try adding a recipe
+ tempsrc = os.path.join(self.tempdir, 'srctree')
+ os.makedirs(tempsrc)
+ recipefile = os.path.join(self.tempdir, 'libmatchbox.bb')
+ srcuri = 'git://git.yoctoproject.org/libmatchbox'
+ result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri + ";rev=9f7cf8895ae2d39c465c04cc78e918c157420269", '-x', tempsrc])
+ self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
+ checkvars = {}
+ checkvars['LICENSE'] = 'LGPLv2.1'
+ checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=7fbc338309ac38fefcd64b04bb903e34'
+ checkvars['S'] = '${WORKDIR}/git'
+ checkvars['PV'] = '1.11+git${SRCPV}'
+ checkvars['SRC_URI'] = srcuri
+ checkvars['DEPENDS'] = set(['libcheck', 'libjpeg-turbo', 'libpng', 'libx11', 'libxext', 'pango'])
+ inherits = ['autotools', 'pkgconfig']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ @OETestID(1392)
+ def test_recipetool_create_simple(self):
+ # Try adding a recipe
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ pv = '1.7.3.0'
+ srcuri = 'http://www.dest-unreach.org/socat/download/socat-%s.tar.bz2' % pv
+ result = runCmd('recipetool create %s -o %s' % (srcuri, temprecipe))
+ dirlist = os.listdir(temprecipe)
+ if len(dirlist) > 1:
+ self.fail('recipetool created more than just one file; output:\n%s\ndirlist:\n%s' % (result.output, str(dirlist)))
+ if len(dirlist) < 1 or not os.path.isfile(os.path.join(temprecipe, dirlist[0])):
+ self.fail('recipetool did not create recipe file; output:\n%s\ndirlist:\n%s' % (result.output, str(dirlist)))
+ self.assertEqual(dirlist[0], 'socat_%s.bb' % pv, 'Recipe file incorrectly named')
+ checkvars = {}
+ checkvars['LICENSE'] = set(['Unknown', 'GPLv2'])
+ checkvars['LIC_FILES_CHKSUM'] = set(['file://COPYING.OpenSSL;md5=5c9bccc77f67a8328ef4ebaf468116f4', 'file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263'])
+ # We don't check DEPENDS since they are variable for this recipe depending on what's in the sysroot
+ checkvars['S'] = None
+ checkvars['SRC_URI'] = srcuri.replace(pv, '${PV}')
+ inherits = ['autotools']
+ self._test_recipe_contents(os.path.join(temprecipe, dirlist[0]), checkvars, inherits)
+
+ @OETestID(1418)
+ def test_recipetool_create_cmake(self):
+ # Try adding a recipe
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ recipefile = os.path.join(temprecipe, 'navit_0.5.0.bb')
+ srcuri = 'http://downloads.sourceforge.net/project/navit/v0.5.0/navit-0.5.0.tar.gz'
+ result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['Unknown', 'GPLv2', 'LGPLv2'])
+ checkvars['SRC_URI'] = 'http://downloads.sourceforge.net/project/navit/v${PV}/navit-${PV}.tar.gz'
+ checkvars['SRC_URI[md5sum]'] = '242f398e979a6b8c0f3c802b63435b68'
+ checkvars['SRC_URI[sha256sum]'] = '13353481d7fc01a4f64e385dda460b51496366bba0fd2cc85a89a0747910e94d'
+ checkvars['DEPENDS'] = set(['freetype', 'zlib', 'openssl', 'glib-2.0', 'virtual/libgl', 'virtual/egl', 'gtk+', 'libpng', 'libsdl', 'freeglut', 'dbus-glib'])
+ inherits = ['cmake', 'python-dir', 'gettext', 'pkgconfig']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ @OETestID(1638)
+ def test_recipetool_create_github(self):
+ # Basic test to see if github URL mangling works
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ recipefile = os.path.join(temprecipe, 'meson_git.bb')
+ srcuri = 'https://github.com/mesonbuild/meson;rev=0.32.0'
+ result = runCmd(['recipetool', 'create', '-o', temprecipe, srcuri])
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['Apache-2.0'])
+ checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https'
+ inherits = ['setuptools']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ @OETestID(1639)
+ def test_recipetool_create_github_tarball(self):
+ # Basic test to ensure github URL mangling doesn't apply to release tarballs
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ pv = '0.32.0'
+ recipefile = os.path.join(temprecipe, 'meson_%s.bb' % pv)
+ srcuri = 'https://github.com/mesonbuild/meson/releases/download/%s/meson-%s.tar.gz' % (pv, pv)
+ result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['Apache-2.0'])
+ checkvars['SRC_URI'] = 'https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${PV}.tar.gz'
+ inherits = ['setuptools']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ @OETestID(1637)
+ def test_recipetool_create_git_http(self):
+ # Basic test to check http git URL mangling works
+ temprecipe = os.path.join(self.tempdir, 'recipe')
+ os.makedirs(temprecipe)
+ recipefile = os.path.join(temprecipe, 'matchbox-terminal_git.bb')
+ srcuri = 'http://git.yoctoproject.org/git/matchbox-terminal'
+ result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
+ self.assertTrue(os.path.isfile(recipefile))
+ checkvars = {}
+ checkvars['LICENSE'] = set(['GPLv2'])
+ checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/matchbox-terminal;protocol=http'
+ inherits = ['pkgconfig', 'autotools']
+ self._test_recipe_contents(recipefile, checkvars, inherits)
+
+ def _copy_file_with_cleanup(self, srcfile, basedstdir, *paths):
+ dstdir = basedstdir
+ self.assertTrue(os.path.exists(dstdir))
+ for p in paths:
+ dstdir = os.path.join(dstdir, p)
+ if not os.path.exists(dstdir):
+ os.makedirs(dstdir)
+ self.track_for_cleanup(dstdir)
+ dstfile = os.path.join(dstdir, os.path.basename(srcfile))
+ if srcfile != dstfile:
+ shutil.copy(srcfile, dstfile)
+ self.track_for_cleanup(dstfile)
+
+ @OETestID(1640)
+ def test_recipetool_load_plugin(self):
+ """Test that recipetool loads only the first found plugin in BBPATH."""
+
+ recipetool = runCmd("which recipetool")
+ fromname = runCmd("recipetool --quiet pluginfile")
+ srcfile = fromname.output
+ searchpath = self.bbpath.split(':') + [os.path.dirname(recipetool.output)]
+ plugincontent = []
+ with open(srcfile) as fh:
+ plugincontent = fh.readlines()
+ try:
+ self.assertIn('meta-selftest', srcfile, 'wrong bbpath plugin found')
+ for path in searchpath:
+ self._copy_file_with_cleanup(srcfile, path, 'lib', 'recipetool')
+ result = runCmd("recipetool --quiet count")
+ self.assertEqual(result.output, '1')
+ result = runCmd("recipetool --quiet multiloaded")
+ self.assertEqual(result.output, "no")
+ for path in searchpath:
+ result = runCmd("recipetool --quiet bbdir")
+ self.assertEqual(result.output, path)
+ os.unlink(os.path.join(result.output, 'lib', 'recipetool', 'bbpath.py'))
+ finally:
+ with open(srcfile, 'w') as fh:
+ fh.writelines(plugincontent)
+
+
+class RecipetoolAppendsrcBase(RecipetoolBase):
+ def _try_recipetool_appendsrcfile(self, testrecipe, newfile, destfile, options, expectedlines, expectedfiles):
+ cmd = 'recipetool appendsrcfile %s %s %s %s %s' % (options, self.templayerdir, testrecipe, newfile, destfile)
+ return self._try_recipetool_appendcmd(cmd, testrecipe, expectedfiles, expectedlines)
+
+ def _try_recipetool_appendsrcfiles(self, testrecipe, newfiles, expectedlines=None, expectedfiles=None, destdir=None, options=''):
+
+ if destdir:
+ options += ' -D %s' % destdir
+
+ if expectedfiles is None:
+ expectedfiles = [os.path.basename(f) for f in newfiles]
+
+ cmd = 'recipetool appendsrcfiles %s %s %s %s' % (options, self.templayerdir, testrecipe, ' '.join(newfiles))
+ return self._try_recipetool_appendcmd(cmd, testrecipe, expectedfiles, expectedlines)
+
+ def _try_recipetool_appendsrcfile_fail(self, testrecipe, newfile, destfile, checkerror):
+ cmd = 'recipetool appendsrcfile %s %s %s %s' % (self.templayerdir, testrecipe, newfile, destfile or '')
+ result = runCmd(cmd, ignore_status=True)
+ self.assertNotEqual(result.status, 0, 'Command "%s" should have failed but didn\'t' % cmd)
+ self.assertNotIn('Traceback', result.output)
+ for errorstr in checkerror:
+ self.assertIn(errorstr, result.output)
+
+ @staticmethod
+ def _get_first_file_uri(recipe):
+ '''Return the first file:// in SRC_URI for the specified recipe.'''
+ src_uri = get_bb_var('SRC_URI', recipe).split()
+ for uri in src_uri:
+ p = urllib.parse.urlparse(uri)
+ if p.scheme == 'file':
+ return p.netloc + p.path
+
+ def _test_appendsrcfile(self, testrecipe, filename=None, destdir=None, has_src_uri=True, srcdir=None, newfile=None, options=''):
+ if newfile is None:
+ newfile = self.testfile
+
+ if srcdir:
+ if destdir:
+ expected_subdir = os.path.join(srcdir, destdir)
+ else:
+ expected_subdir = srcdir
+ else:
+ options += " -W"
+ expected_subdir = destdir
+
+ if filename:
+ if destdir:
+ destpath = os.path.join(destdir, filename)
+ else:
+ destpath = filename
+ else:
+ filename = os.path.basename(newfile)
+ if destdir:
+ destpath = destdir + os.sep
+ else:
+ destpath = '.' + os.sep
+
+ expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n',
+ '\n']
+ if has_src_uri:
+ uri = 'file://%s' % filename
+ if expected_subdir:
+ uri += ';subdir=%s' % expected_subdir
+ expectedlines[0:0] = ['SRC_URI += "%s"\n' % uri,
+ '\n']
+
+ return self._try_recipetool_appendsrcfile(testrecipe, newfile, destpath, options, expectedlines, [filename])
+
+ def _test_appendsrcfiles(self, testrecipe, newfiles, expectedfiles=None, destdir=None, options=''):
+ if expectedfiles is None:
+ expectedfiles = [os.path.basename(n) for n in newfiles]
+
+ self._try_recipetool_appendsrcfiles(testrecipe, newfiles, expectedfiles=expectedfiles, destdir=destdir, options=options)
+
+ bb_vars = get_bb_vars(['SRC_URI', 'FILE', 'FILESEXTRAPATHS'], testrecipe)
+ src_uri = bb_vars['SRC_URI'].split()
+ for f in expectedfiles:
+ if destdir:
+ self.assertIn('file://%s;subdir=%s' % (f, destdir), src_uri)
+ else:
+ self.assertIn('file://%s' % f, src_uri)
+
+ recipefile = bb_vars['FILE']
+ bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
+ filesdir = os.path.join(os.path.dirname(bbappendfile), testrecipe)
+ filesextrapaths = bb_vars['FILESEXTRAPATHS'].split(':')
+ self.assertIn(filesdir, filesextrapaths)
+
+
+
+
+class RecipetoolAppendsrcTests(RecipetoolAppendsrcBase):
+
+ @OETestID(1273)
+ def test_recipetool_appendsrcfile_basic(self):
+ self._test_appendsrcfile('base-files', 'a-file')
+
+ @OETestID(1274)
+ def test_recipetool_appendsrcfile_basic_wildcard(self):
+ testrecipe = 'base-files'
+ self._test_appendsrcfile(testrecipe, 'a-file', options='-w')
+ recipefile = get_bb_var('FILE', testrecipe)
+ bbappendfile = self._check_bbappend(testrecipe, recipefile, self.templayerdir)
+ self.assertEqual(os.path.basename(bbappendfile), '%s_%%.bbappend' % testrecipe)
+
+ @OETestID(1281)
+ def test_recipetool_appendsrcfile_subdir_basic(self):
+ self._test_appendsrcfile('base-files', 'a-file', 'tmp')
+
+ @OETestID(1282)
+ def test_recipetool_appendsrcfile_subdir_basic_dirdest(self):
+ self._test_appendsrcfile('base-files', destdir='tmp')
+
+ @OETestID(1280)
+ def test_recipetool_appendsrcfile_srcdir_basic(self):
+ testrecipe = 'bash'
+ bb_vars = get_bb_vars(['S', 'WORKDIR'], testrecipe)
+ srcdir = bb_vars['S']
+ workdir = bb_vars['WORKDIR']
+ subdir = os.path.relpath(srcdir, workdir)
+ self._test_appendsrcfile(testrecipe, 'a-file', srcdir=subdir)
+
+ @OETestID(1275)
+ def test_recipetool_appendsrcfile_existing_in_src_uri(self):
+ testrecipe = 'base-files'
+ filepath = self._get_first_file_uri(testrecipe)
+ self.assertTrue(filepath, 'Unable to test, no file:// uri found in SRC_URI for %s' % testrecipe)
+ self._test_appendsrcfile(testrecipe, filepath, has_src_uri=False)
+
+ @OETestID(1276)
+ def test_recipetool_appendsrcfile_existing_in_src_uri_diff_params(self):
+ testrecipe = 'base-files'
+ subdir = 'tmp'
+ filepath = self._get_first_file_uri(testrecipe)
+ self.assertTrue(filepath, 'Unable to test, no file:// uri found in SRC_URI for %s' % testrecipe)
+
+ output = self._test_appendsrcfile(testrecipe, filepath, subdir, has_src_uri=False)
+ self.assertTrue(any('with different parameters' in l for l in output))
+
+ @OETestID(1277)
+ def test_recipetool_appendsrcfile_replace_file_srcdir(self):
+ testrecipe = 'bash'
+ filepath = 'Makefile.in'
+ bb_vars = get_bb_vars(['S', 'WORKDIR'], testrecipe)
+ srcdir = bb_vars['S']
+ workdir = bb_vars['WORKDIR']
+ subdir = os.path.relpath(srcdir, workdir)
+
+ self._test_appendsrcfile(testrecipe, filepath, srcdir=subdir)
+ bitbake('%s:do_unpack' % testrecipe)
+ self.assertEqual(open(self.testfile, 'r').read(), open(os.path.join(srcdir, filepath), 'r').read())
+
+ @OETestID(1278)
+ def test_recipetool_appendsrcfiles_basic(self, destdir=None):
+ newfiles = [self.testfile]
+ for i in range(1, 5):
+ testfile = os.path.join(self.tempdir, 'testfile%d' % i)
+ with open(testfile, 'w') as f:
+ f.write('Test file %d\n' % i)
+ newfiles.append(testfile)
+ self._test_appendsrcfiles('gcc', newfiles, destdir=destdir, options='-W')
+
+ @OETestID(1279)
+ def test_recipetool_appendsrcfiles_basic_subdir(self):
+ self.test_recipetool_appendsrcfiles_basic(destdir='testdir')
diff --git a/poky/meta/lib/oeqa/selftest/cases/runcmd.py b/poky/meta/lib/oeqa/selftest/cases/runcmd.py
new file mode 100644
index 000000000..d76d7063c
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/runcmd.py
@@ -0,0 +1,134 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd
+from oeqa.utils import CommandError
+from oeqa.core.decorator.oeid import OETestID
+
+import subprocess
+import threading
+import time
+import signal
+
+class MemLogger(object):
+ def __init__(self):
+ self.info_msgs = []
+ self.error_msgs = []
+
+ def info(self, msg):
+ self.info_msgs.append(msg)
+
+ def error(self, msg):
+ self.error_msgs.append(msg)
+
+class RunCmdTests(OESelftestTestCase):
+ """ Basic tests for runCmd() utility function """
+
+ # The delta is intentionally smaller than the timeout, to detect cases where
+ # we incorrectly apply the timeout more than once.
+ TIMEOUT = 2
+ DELTA = 1
+
+ @OETestID(1916)
+ def test_result_okay(self):
+ result = runCmd("true")
+ self.assertEqual(result.status, 0)
+
+ @OETestID(1915)
+ def test_result_false(self):
+ result = runCmd("false", ignore_status=True)
+ self.assertEqual(result.status, 1)
+
+ @OETestID(1917)
+ def test_shell(self):
+ # A shell is used for all string commands.
+ result = runCmd("false; true", ignore_status=True)
+ self.assertEqual(result.status, 0)
+
+ @OETestID(1910)
+ def test_no_shell(self):
+ self.assertRaises(FileNotFoundError,
+ runCmd, "false; true", shell=False)
+
+ @OETestID(1906)
+ def test_list_not_found(self):
+ self.assertRaises(FileNotFoundError,
+ runCmd, ["false; true"])
+
+ @OETestID(1907)
+ def test_list_okay(self):
+ result = runCmd(["true"])
+ self.assertEqual(result.status, 0)
+
+ @OETestID(1913)
+ def test_result_assertion(self):
+ self.assertRaisesRegexp(AssertionError, "Command 'echo .* false' returned non-zero exit status 1:\nfoobar",
+ runCmd, "echo foobar >&2; false", shell=True)
+
+ @OETestID(1914)
+ def test_result_exception(self):
+ self.assertRaisesRegexp(CommandError, "Command 'echo .* false' returned non-zero exit status 1 with output: foobar",
+ runCmd, "echo foobar >&2; false", shell=True, assert_error=False)
+
+ @OETestID(1911)
+ def test_output(self):
+ result = runCmd("echo stdout; echo stderr >&2", shell=True)
+ self.assertEqual("stdout\nstderr", result.output)
+ self.assertEqual("", result.error)
+
+ @OETestID(1912)
+ def test_output_split(self):
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, stderr=subprocess.PIPE)
+ self.assertEqual("stdout", result.output)
+ self.assertEqual("stderr", result.error)
+
+ @OETestID(1920)
+ def test_timeout(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ # Killing a hanging process only works when not using a shell?!
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ @OETestID(1921)
+ def test_timeout_split(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ # Killing a hanging process only works when not using a shell?!
+ result = runCmd(['sleep', '60'], timeout=self.TIMEOUT, ignore_status=True, stderr=subprocess.PIPE)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ @OETestID(1918)
+ def test_stdin(self):
+ numthreads = threading.active_count()
+ result = runCmd("cat", data=b"hello world", timeout=self.TIMEOUT)
+ self.assertEqual("hello world", result.output)
+ self.assertEqual(numthreads, threading.active_count())
+
+ @OETestID(1919)
+ def test_stdin_timeout(self):
+ numthreads = threading.active_count()
+ start = time.time()
+ result = runCmd(['sleep', '60'], data=b"hello world", timeout=self.TIMEOUT, ignore_status=True)
+ self.assertEqual(result.status, -signal.SIGTERM)
+ end = time.time()
+ self.assertLess(end - start, self.TIMEOUT + self.DELTA)
+ self.assertEqual(numthreads, threading.active_count())
+
+ @OETestID(1908)
+ def test_log(self):
+ log = MemLogger()
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log)
+ self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout", "stderr"], log.info_msgs)
+ self.assertEqual([], log.error_msgs)
+
+ @OETestID(1909)
+ def test_log_split(self):
+ log = MemLogger()
+ result = runCmd("echo stdout; echo stderr >&2", shell=True, output_log=log, stderr=subprocess.PIPE)
+ self.assertEqual(["Running: echo stdout; echo stderr >&2", "stdout"], log.info_msgs)
+ self.assertEqual(["stderr"], log.error_msgs)
diff --git a/poky/meta/lib/oeqa/selftest/cases/runqemu.py b/poky/meta/lib/oeqa/selftest/cases/runqemu.py
new file mode 100644
index 000000000..5ebdd57a4
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/runqemu.py
@@ -0,0 +1,206 @@
+#
+# Copyright (c) 2017 Wind River Systems, Inc.
+#
+
+import re
+import tempfile
+import time
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import bitbake, runqemu, get_bb_var, runCmd
+from oeqa.core.decorator.oeid import OETestID
+
+class RunqemuTests(OESelftestTestCase):
+ """Runqemu test class"""
+
+ image_is_ready = False
+ deploy_dir_image = ''
+ # We only want to print runqemu stdout/stderr if there is a test case failure
+ buffer = True
+
+ def setUpLocal(self):
+ super(RunqemuTests, self).setUpLocal()
+ self.recipe = 'core-image-minimal'
+ self.machine = 'qemux86-64'
+ self.fstypes = "ext4 iso hddimg wic.vmdk wic.qcow2 wic.vdi"
+ self.cmd_common = "runqemu nographic"
+
+ self.write_config(
+"""
+MACHINE = "%s"
+IMAGE_FSTYPES = "%s"
+# 10 means 1 second
+SYSLINUX_TIMEOUT = "10"
+"""
+% (self.machine, self.fstypes)
+ )
+
+ if not RunqemuTests.image_is_ready:
+ RunqemuTests.deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ bitbake(self.recipe)
+ RunqemuTests.image_is_ready = True
+
+ @OETestID(2001)
+ def test_boot_machine(self):
+ """Test runqemu machine"""
+ cmd = "%s %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+ @OETestID(2002)
+ def test_boot_machine_ext4(self):
+ """Test runqemu machine ext4"""
+ cmd = "%s %s ext4" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue('rootfs.ext4' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2003)
+ def test_boot_machine_iso(self):
+ """Test runqemu machine iso"""
+ cmd = "%s %s iso" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue('media=cdrom' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2004)
+ def test_boot_recipe_image(self):
+ """Test runqemu recipe-image"""
+ cmd = "%s %s" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+ @OETestID(2005)
+ def test_boot_recipe_image_vmdk(self):
+ """Test runqemu recipe-image vmdk"""
+ cmd = "%s %s wic.vmdk" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue('format=vmdk' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2006)
+ def test_boot_recipe_image_vdi(self):
+ """Test runqemu recipe-image vdi"""
+ cmd = "%s %s wic.vdi" % (self.cmd_common, self.recipe)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue('format=vdi' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2007)
+ def test_boot_deploy(self):
+ """Test runqemu deploy_dir_image"""
+ cmd = "%s %s" % (self.cmd_common, self.deploy_dir_image)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+ @OETestID(2008)
+ def test_boot_deploy_hddimg(self):
+ """Test runqemu deploy_dir_image hddimg"""
+ cmd = "%s %s hddimg" % (self.cmd_common, self.deploy_dir_image)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(re.search('file=.*.hddimg', f.read()), "Failed: %s" % cmd)
+
+ @OETestID(2009)
+ def test_boot_machine_slirp(self):
+ """Test runqemu machine slirp"""
+ cmd = "%s slirp %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue(' -netdev user' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2009)
+ def test_boot_machine_slirp_qcow2(self):
+ """Test runqemu machine slirp qcow2"""
+ cmd = "%s slirp wic.qcow2 %s" % (self.cmd_common, self.machine)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ with open(qemu.qemurunnerlog) as f:
+ self.assertTrue('format=qcow2' in f.read(), "Failed: %s" % cmd)
+
+ @OETestID(2010)
+ def test_boot_qemu_boot(self):
+ """Test runqemu /path/to/image.qemuboot.conf"""
+ qemuboot_conf = "%s-%s.qemuboot.conf" % (self.recipe, self.machine)
+ qemuboot_conf = os.path.join(self.deploy_dir_image, qemuboot_conf)
+ if not os.path.exists(qemuboot_conf):
+ self.skipTest("%s not found" % qemuboot_conf)
+ cmd = "%s %s" % (self.cmd_common, qemuboot_conf)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+ @OETestID(2011)
+ def test_boot_rootfs(self):
+ """Test runqemu /path/to/rootfs.ext4"""
+ rootfs = "%s-%s.ext4" % (self.recipe, self.machine)
+ rootfs = os.path.join(self.deploy_dir_image, rootfs)
+ if not os.path.exists(rootfs):
+ self.skipTest("%s not found" % rootfs)
+ cmd = "%s %s" % (self.cmd_common, rootfs)
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ self.assertTrue(qemu.runner.logged, "Failed: %s" % cmd)
+
+# This test was designed as a separate class to test that shutdown
+# command will shutdown qemu as expected on each qemu architecture
+# based on the MACHINE configuration inside the config file
+# (eg. local.conf).
+#
+# This was different compared to RunqemuTests, where RunqemuTests was
+# dedicated for MACHINE=qemux86-64 where it test that qemux86-64 will
+# bootup various filesystem types, including live image(iso and hddimg)
+# where live image was not supported on all qemu architecture.
+class QemuTest(OESelftestTestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ super(QemuTest, cls).setUpClass()
+ cls.recipe = 'core-image-minimal'
+ cls.machine = get_bb_var('MACHINE')
+ cls.deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE')
+ cls.cmd_common = "runqemu nographic"
+ cls.qemuboot_conf = "%s-%s.qemuboot.conf" % (cls.recipe, cls.machine)
+ cls.qemuboot_conf = os.path.join(cls.deploy_dir_image, cls.qemuboot_conf)
+ bitbake(cls.recipe)
+
+ def _start_qemu_shutdown_check_if_shutdown_succeeded(self, qemu, timeout):
+ qemu.run_serial("shutdown -h now")
+ # Stop thread will stop the LoggingThread instance used for logging
+ # qemu through serial console, stop thread will prevent this code
+ # from facing exception (Console connection closed unexpectedly)
+ # when qemu was shutdown by the above shutdown command
+ qemu.runner.stop_thread()
+ time_track = 0
+ while True:
+ is_alive = qemu.check()
+ if not is_alive:
+ return True
+ if time_track > timeout:
+ return False
+ time.sleep(1)
+ time_track += 1
+
+ def test_qemu_can_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ cmd = "%s %s" % (self.cmd_common, self.qemuboot_conf)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+
+ # Need to have portmap/rpcbind running to allow this test to work and
+ # current autobuilder setup does not have this.
+ def disabled_test_qemu_can_boot_nfs_and_shutdown(self):
+ self.assertExists(self.qemuboot_conf)
+ bitbake('meta-ide-support')
+ rootfs_tar = "%s-%s.tar.bz2" % (self.recipe, self.machine)
+ rootfs_tar = os.path.join(self.deploy_dir_image, rootfs_tar)
+ self.assertExists(rootfs_tar)
+ tmpdir = tempfile.mkdtemp(prefix='qemu_nfs')
+ tmpdir_nfs = os.path.join(tmpdir, 'nfs')
+ cmd_extract_nfs = 'runqemu-extract-sdk %s %s' % (rootfs_tar, tmpdir_nfs)
+ result = runCmd(cmd_extract_nfs)
+ self.assertEqual(0, result.status, "runqemu-extract-sdk didn't run as expected. %s" % result.output)
+ cmd = "%s nfs %s %s" % (self.cmd_common, self.qemuboot_conf, tmpdir_nfs)
+ shutdown_timeout = 120
+ with runqemu(self.recipe, ssh=False, launch_cmd=cmd) as qemu:
+ qemu_shutdown_succeeded = self._start_qemu_shutdown_check_if_shutdown_succeeded(qemu, shutdown_timeout)
+ self.assertTrue(qemu_shutdown_succeeded, 'Failed: %s does not shutdown within timeout(%s)' % (self.machine, shutdown_timeout))
+ runCmd('rm -rf %s' % tmpdir)
diff --git a/poky/meta/lib/oeqa/selftest/cases/runtime_test.py b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py
new file mode 100644
index 000000000..9c9b4b341
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py
@@ -0,0 +1,260 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu
+from oeqa.utils.sshcontrol import SSHControl
+from oeqa.core.decorator.oeid import OETestID
+import os
+import re
+import tempfile
+import shutil
+
+class TestExport(OESelftestTestCase):
+
+ @classmethod
+ def tearDownClass(cls):
+ runCmd("rm -rf /tmp/sdk")
+ super(TestExport, cls).tearDownClass()
+
+ @OETestID(1499)
+ def test_testexport_basic(self):
+ """
+ Summary: Check basic testexport functionality with only ping test enabled.
+ Expected: 1. testexport directory must be created.
+ 2. runexported.py must run without any error/exception.
+ 3. ping test must succeed.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ features = 'INHERIT += "testexport"\n'
+ # These aren't the actual IP addresses but testexport class needs something defined
+ features += 'TEST_SERVER_IP = "192.168.7.1"\n'
+ features += 'TEST_TARGET_IP = "192.168.7.1"\n'
+ features += 'TEST_SUITES = "ping"\n'
+ self.write_config(features)
+
+ # Build tesexport for core-image-minimal
+ bitbake('core-image-minimal')
+ bitbake('-c testexport core-image-minimal')
+
+ testexport_dir = get_bb_var('TEST_EXPORT_DIR', 'core-image-minimal')
+
+ # Verify if TEST_EXPORT_DIR was created
+ isdir = os.path.isdir(testexport_dir)
+ self.assertEqual(True, isdir, 'Failed to create testexport dir: %s' % testexport_dir)
+
+ with runqemu('core-image-minimal') as qemu:
+ # Attempt to run runexported.py to perform ping test
+ test_path = os.path.join(testexport_dir, "oe-test")
+ data_file = os.path.join(testexport_dir, 'data', 'testdata.json')
+ manifest = os.path.join(testexport_dir, 'data', 'manifest')
+ cmd = ("%s runtime --test-data-file %s --packages-manifest %s "
+ "--target-ip %s --server-ip %s --quiet"
+ % (test_path, data_file, manifest, qemu.ip, qemu.server_ip))
+ result = runCmd(cmd)
+ # Verify ping test was succesful
+ self.assertEqual(0, result.status, 'oe-test runtime returned a non 0 status')
+
+ @OETestID(1641)
+ def test_testexport_sdk(self):
+ """
+ Summary: Check sdk functionality for testexport.
+ Expected: 1. testexport directory must be created.
+ 2. SDK tarball must exists.
+ 3. Uncompressing of tarball must succeed.
+ 4. Check if the SDK directory is added to PATH.
+ 5. Run tar from the SDK directory.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ features = 'INHERIT += "testexport"\n'
+ # These aren't the actual IP addresses but testexport class needs something defined
+ features += 'TEST_SERVER_IP = "192.168.7.1"\n'
+ features += 'TEST_TARGET_IP = "192.168.7.1"\n'
+ features += 'TEST_SUITES = "ping"\n'
+ features += 'TEST_EXPORT_SDK_ENABLED = "1"\n'
+ features += 'TEST_EXPORT_SDK_PACKAGES = "nativesdk-tar"\n'
+ self.write_config(features)
+
+ # Build tesexport for core-image-minimal
+ bitbake('core-image-minimal')
+ bitbake('-c testexport core-image-minimal')
+
+ needed_vars = ['TEST_EXPORT_DIR', 'TEST_EXPORT_SDK_DIR', 'TEST_EXPORT_SDK_NAME']
+ bb_vars = get_bb_vars(needed_vars, 'core-image-minimal')
+ testexport_dir = bb_vars['TEST_EXPORT_DIR']
+ sdk_dir = bb_vars['TEST_EXPORT_SDK_DIR']
+ sdk_name = bb_vars['TEST_EXPORT_SDK_NAME']
+
+ # Check for SDK
+ tarball_name = "%s.sh" % sdk_name
+ tarball_path = os.path.join(testexport_dir, sdk_dir, tarball_name)
+ msg = "Couldn't find SDK tarball: %s" % tarball_path
+ self.assertEqual(os.path.isfile(tarball_path), True, msg)
+
+ # Extract SDK and run tar from SDK
+ result = runCmd("%s -y -d /tmp/sdk" % tarball_path)
+ self.assertEqual(0, result.status, "Couldn't extract SDK")
+
+ env_script = result.output.split()[-1]
+ result = runCmd(". %s; which tar" % env_script, shell=True)
+ self.assertEqual(0, result.status, "Couldn't setup SDK environment")
+ is_sdk_tar = True if "/tmp/sdk" in result.output else False
+ self.assertTrue(is_sdk_tar, "Couldn't setup SDK environment")
+
+ tar_sdk = result.output
+ result = runCmd("%s --version" % tar_sdk)
+ self.assertEqual(0, result.status, "Couldn't run tar from SDK")
+
+
+class TestImage(OESelftestTestCase):
+
+ @OETestID(1644)
+ def test_testimage_install(self):
+ """
+ Summary: Check install packages functionality for testimage/testexport.
+ Expected: 1. Import tests from a directory other than meta.
+ 2. Check install/uninstall of socat.
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
+
+ features = 'INHERIT += "testimage"\n'
+ features += 'TEST_SUITES = "ping ssh selftest"\n'
+ self.write_config(features)
+
+ # Build core-image-sato and testimage
+ bitbake('core-image-full-cmdline socat')
+ bitbake('-c testimage core-image-full-cmdline')
+
+ @OETestID(1883)
+ def test_testimage_dnf(self):
+ """
+ Summary: Check package feeds functionality for dnf
+ Expected: 1. Check that remote package feeds can be accessed
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+ if get_bb_var('DISTRO') == 'poky-tiny':
+ self.skipTest('core-image-full-cmdline not buildable for poky-tiny')
+
+ features = 'INHERIT += "testimage"\n'
+ features += 'TEST_SUITES = "ping ssh dnf_runtime dnf.DnfBasicTest.test_dnf_help"\n'
+ # We don't yet know what the server ip and port will be - they will be patched
+ # in at the start of the on-image test
+ features += 'PACKAGE_FEED_URIS = "http://bogus_ip:bogus_port"\n'
+ features += 'EXTRA_IMAGE_FEATURES += "package-management"\n'
+ features += 'PACKAGE_CLASSES = "package_rpm"\n'
+
+ # Enable package feed signing
+ self.gpg_home = tempfile.mkdtemp(prefix="oeqa-feed-sign-")
+ signing_key_dir = os.path.join(self.testlayer_path, 'files', 'signing')
+ runCmd('gpg --batch --homedir %s --import %s' % (self.gpg_home, os.path.join(signing_key_dir, 'key.secret')))
+ features += 'INHERIT += "sign_package_feed"\n'
+ features += 'PACKAGE_FEED_GPG_NAME = "testuser"\n'
+ features += 'PACKAGE_FEED_GPG_PASSPHRASE_FILE = "%s"\n' % os.path.join(signing_key_dir, 'key.passphrase')
+ features += 'GPG_PATH = "%s"\n' % self.gpg_home
+ self.write_config(features)
+
+ # Build core-image-sato and testimage
+ bitbake('core-image-full-cmdline socat')
+ bitbake('-c testimage core-image-full-cmdline')
+
+ # remove the oeqa-feed-sign temporal directory
+ shutil.rmtree(self.gpg_home, ignore_errors=True)
+
+class Postinst(OESelftestTestCase):
+ @OETestID(1540)
+ @OETestID(1545)
+ def test_postinst_rootfs_and_boot(self):
+ """
+ Summary: The purpose of this test case is to verify Post-installation
+ scripts are called when rootfs is created and also test
+ that script can be delayed to run at first boot.
+ Dependencies: NA
+ Steps: 1. Add proper configuration to local.conf file
+ 2. Build a "core-image-minimal" image
+ 3. Verify that file created by postinst_rootfs recipe is
+ present on rootfs dir.
+ 4. Boot the image created on qemu and verify that the file
+ created by postinst_boot recipe is present on image.
+ Expected: The files are successfully created during rootfs and boot
+ time for 3 different package managers: rpm,ipk,deb and
+ for initialization managers: sysvinit and systemd.
+
+ """
+
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+ targettestdir = os.path.join(sysconfdir, "postinst-test")
+
+ for init_manager in ("sysvinit", "systemd"):
+ for classes in ("package_rpm", "package_deb", "package_ipk"):
+ with self.subTest(init_manager=init_manager, package_class=classes):
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-delayed-b"\n'
+ features += 'IMAGE_FEATURES += "package-management empty-root-password"\n'
+ features += 'PACKAGE_CLASSES = "%s"\n' % classes
+ if init_manager == "systemd":
+ features += 'DISTRO_FEATURES_append = " systemd"\n'
+ features += 'VIRTUAL-RUNTIME_init_manager = "systemd"\n'
+ features += 'DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit"\n'
+ features += 'VIRTUAL-RUNTIME_initscripts = ""\n'
+ self.write_config(features)
+
+ bitbake('core-image-minimal')
+
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs")),
+ "rootfs state file was not created")
+
+ with runqemu('core-image-minimal') as qemu:
+ # Make the test echo a string and search for that as
+ # run_serial()'s status code is useless.'
+ for filename in ("rootfs", "delayed-a", "delayed-b"):
+ status, output = qemu.run_serial("test -f %s && echo found" % os.path.join(targettestdir, filename))
+ self.assertEqual(output, "found", "%s was not present on boot" % filename)
+
+
+
+ def test_failing_postinst(self):
+ """
+ Summary: The purpose of this test case is to verify that post-installation
+ scripts that contain errors are properly reported.
+ Expected: The scriptlet failure is properly reported.
+ The file that is created after the error in the scriptlet is not present.
+ Product: oe-core
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ """
+
+ import oe.path
+
+ vars = get_bb_vars(("IMAGE_ROOTFS", "sysconfdir"), "core-image-minimal")
+ rootfs = vars["IMAGE_ROOTFS"]
+ self.assertIsNotNone(rootfs)
+ sysconfdir = vars["sysconfdir"]
+ self.assertIsNotNone(sysconfdir)
+ # Need to use oe.path here as sysconfdir starts with /
+ hosttestdir = oe.path.join(rootfs, sysconfdir, "postinst-test")
+
+ for classes in ("package_rpm", "package_deb", "package_ipk"):
+ with self.subTest(package_class=classes):
+ features = 'CORE_IMAGE_EXTRA_INSTALL = "postinst-rootfs-failing"\n'
+ features += 'PACKAGE_CLASSES = "%s"\n' % classes
+ self.write_config(features)
+ bb_result = bitbake('core-image-minimal')
+ self.assertGreaterEqual(bb_result.output.find("Intentionally failing postinstall scriptlets of ['postinst-rootfs-failing'] to defer them to first boot is deprecated."), 0,
+ "Warning about a failed scriptlet not found in bitbake output: %s" %(bb_result.output))
+
+ self.assertTrue(os.path.isfile(os.path.join(hosttestdir, "rootfs-before-failure")),
+ "rootfs-before-failure file was not created")
+ self.assertFalse(os.path.isfile(os.path.join(hosttestdir, "rootfs-after-failure")),
+ "rootfs-after-failure file was created")
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/selftest.py b/poky/meta/lib/oeqa/selftest/cases/selftest.py
new file mode 100644
index 000000000..4b3cb1446
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/selftest.py
@@ -0,0 +1,51 @@
+import importlib
+from oeqa.utils.commands import runCmd
+import oeqa.selftest
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.core.decorator.oeid import OETestID
+
+class ExternalLayer(OESelftestTestCase):
+
+ @OETestID(1885)
+ def test_list_imported(self):
+ """
+ Summary: Checks functionality to import tests from other layers.
+ Expected: 1. File "external-layer.py" must be in
+ oeqa.selftest.__path__
+ 2. test_unconditional_pas method must exists
+ in ImportedTests class
+ Product: oe-core
+ Author: Mariano Lopez <mariano.lopez@intel.com>
+ """
+
+ test_file = "external-layer.py"
+ test_module = "oeqa.selftest.cases.external-layer"
+ method_name = "test_unconditional_pass"
+
+ # Check if "external-layer.py" is in oeqa path
+ found_file = search_test_file(test_file)
+ self.assertTrue(found_file, msg="Can't find %s in the oeqa path" % test_file)
+
+ # Import oeqa.selftest.external-layer module and search for
+ # test_unconditional_pass method of ImportedTests class
+ found_method = search_method(test_module, method_name)
+ self.assertTrue(method_name, msg="Can't find %s method" % method_name)
+
+def search_test_file(file_name):
+ for layer_path in oeqa.selftest.__path__:
+ for _, _, files in os.walk(layer_path):
+ for f in files:
+ if f == file_name:
+ return True
+ return False
+
+def search_method(module, method):
+ modlib = importlib.import_module(module)
+ for var in vars(modlib):
+ klass = vars(modlib)[var]
+ if isinstance(klass, type(OESelftestTestCase)) and issubclass(klass, OESelftestTestCase):
+ for m in dir(klass):
+ if m == method:
+ return True
+ return False
+
diff --git a/poky/meta/lib/oeqa/selftest/cases/signing.py b/poky/meta/lib/oeqa/selftest/cases/signing.py
new file mode 100644
index 000000000..a750cfc7b
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/signing.py
@@ -0,0 +1,187 @@
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
+import os
+import glob
+import re
+import shutil
+import tempfile
+from oeqa.core.decorator.oeid import OETestID
+from oeqa.utils.ftools import write_file
+
+
+class Signing(OESelftestTestCase):
+
+ gpg_dir = ""
+ pub_key_path = ""
+ secret_key_path = ""
+
+ @classmethod
+ def setUpClass(cls):
+ super(Signing, cls).setUpClass()
+ # Check that we can find the gpg binary and fail early if we can't
+ if not shutil.which("gpg"):
+ raise AssertionError("This test needs GnuPG")
+
+ cls.gpg_dir = tempfile.mkdtemp(prefix="oeqa-signing-")
+
+ cls.pub_key_path = os.path.join(cls.testlayer_path, 'files', 'signing', "key.pub")
+ cls.secret_key_path = os.path.join(cls.testlayer_path, 'files', 'signing', "key.secret")
+
+ runCmd('gpg --batch --homedir %s --import %s %s' % (cls.gpg_dir, cls.pub_key_path, cls.secret_key_path))
+
+ @classmethod
+ def tearDownClass(cls):
+ shutil.rmtree(cls.gpg_dir, ignore_errors=True)
+
+ @OETestID(1362)
+ def test_signing_packages(self):
+ """
+ Summary: Test that packages can be signed in the package feed
+ Expected: Package should be signed with the correct key
+ Expected: Images can be created from signed packages
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ Author: Alexander Kanavin <alexander.kanavin@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+ import oe.packagedata
+
+ package_classes = get_bb_var('PACKAGE_CLASSES')
+ if 'package_rpm' not in package_classes:
+ self.skipTest('This test requires RPM Packaging.')
+
+ test_recipe = 'ed'
+
+ feature = 'INHERIT += "sign_rpm"\n'
+ feature += 'RPM_GPG_PASSPHRASE = "test123"\n'
+ feature += 'RPM_GPG_NAME = "testuser"\n'
+ feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
+
+ self.write_config(feature)
+
+ bitbake('-c clean %s' % test_recipe)
+ bitbake('-f -c package_write_rpm %s' % test_recipe)
+
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+
+ needed_vars = ['PKGDATA_DIR', 'DEPLOY_DIR_RPM', 'PACKAGE_ARCH', 'STAGING_BINDIR_NATIVE']
+ bb_vars = get_bb_vars(needed_vars, test_recipe)
+ pkgdatadir = bb_vars['PKGDATA_DIR']
+ pkgdata = oe.packagedata.read_pkgdatafile(pkgdatadir + "/runtime/ed")
+ if 'PKGE' in pkgdata:
+ pf = pkgdata['PN'] + "-" + pkgdata['PKGE'] + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
+ else:
+ pf = pkgdata['PN'] + "-" + pkgdata['PKGV'] + '-' + pkgdata['PKGR']
+ deploy_dir_rpm = bb_vars['DEPLOY_DIR_RPM']
+ package_arch = bb_vars['PACKAGE_ARCH'].replace('-', '_')
+ staging_bindir_native = bb_vars['STAGING_BINDIR_NATIVE']
+
+ pkg_deploy = os.path.join(deploy_dir_rpm, package_arch, '.'.join((pf, package_arch, 'rpm')))
+
+ # Use a temporary rpmdb
+ rpmdb = tempfile.mkdtemp(prefix='oeqa-rpmdb')
+
+ runCmd('%s/rpmkeys --define "_dbpath %s" --import %s' %
+ (staging_bindir_native, rpmdb, self.pub_key_path))
+
+ ret = runCmd('%s/rpmkeys --define "_dbpath %s" --checksig %s' %
+ (staging_bindir_native, rpmdb, pkg_deploy))
+ # tmp/deploy/rpm/i586/ed-1.9-r0.i586.rpm: rsa sha1 md5 OK
+ self.assertIn('digests signatures OK', ret.output, 'Package signed incorrectly.')
+ shutil.rmtree(rpmdb)
+
+ #Check that an image can be built from signed packages
+ self.add_command_to_tearDown('bitbake -c clean core-image-minimal')
+ bitbake('-c clean core-image-minimal')
+ bitbake('core-image-minimal')
+
+
+ @OETestID(1382)
+ def test_signing_sstate_archive(self):
+ """
+ Summary: Test that sstate archives can be signed
+ Expected: Package should be signed with the correct key
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ test_recipe = 'ed'
+
+ builddir = os.environ.get('BUILDDIR')
+ sstatedir = os.path.join(builddir, 'test-sstate')
+
+ self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe)
+ self.add_command_to_tearDown('rm -rf %s' % sstatedir)
+
+ feature = 'SSTATE_SIG_KEY ?= "testuser"\n'
+ feature += 'SSTATE_SIG_PASSPHRASE ?= "test123"\n'
+ feature += 'SSTATE_VERIFY_SIG ?= "1"\n'
+ feature += 'GPG_PATH = "%s"\n' % self.gpg_dir
+ feature += 'SSTATE_DIR = "%s"\n' % sstatedir
+ # Any mirror might have partial sstate without .sig files, triggering failures
+ feature += 'SSTATE_MIRRORS_forcevariable = ""\n'
+
+ self.write_config(feature)
+
+ bitbake('-c clean %s' % test_recipe)
+ bitbake(test_recipe)
+
+ recipe_sig = glob.glob(sstatedir + '/*/*:ed:*_package.tgz.sig')
+ recipe_tgz = glob.glob(sstatedir + '/*/*:ed:*_package.tgz')
+
+ self.assertEqual(len(recipe_sig), 1, 'Failed to find .sig file.')
+ self.assertEqual(len(recipe_tgz), 1, 'Failed to find .tgz file.')
+
+ ret = runCmd('gpg --homedir %s --verify %s %s' % (self.gpg_dir, recipe_sig[0], recipe_tgz[0]))
+ # gpg: Signature made Thu 22 Oct 2015 01:45:09 PM EEST using RSA key ID 61EEFB30
+ # gpg: Good signature from "testuser (nocomment) <testuser@email.com>"
+ self.assertIn('gpg: Good signature from', ret.output, 'Package signed incorrectly.')
+
+
+class LockedSignatures(OESelftestTestCase):
+
+ @OETestID(1420)
+ def test_locked_signatures(self):
+ """
+ Summary: Test locked signature mechanism
+ Expected: Locked signatures will prevent task to run
+ Product: oe-core
+ Author: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ AutomatedBy: Daniel Istrate <daniel.alexandrux.istrate@intel.com>
+ """
+
+ test_recipe = 'ed'
+ locked_sigs_file = 'locked-sigs.inc'
+
+ self.add_command_to_tearDown('rm -f %s' % os.path.join(self.builddir, locked_sigs_file))
+
+ bitbake(test_recipe)
+ # Generate locked sigs include file
+ bitbake('-S none %s' % test_recipe)
+
+ feature = 'require %s\n' % locked_sigs_file
+ feature += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n'
+ self.write_config(feature)
+
+ # Build a locked recipe
+ bitbake(test_recipe)
+
+ # Make a change that should cause the locked task signature to change
+ recipe_append_file = test_recipe + '_' + get_bb_var('PV', test_recipe) + '.bbappend'
+ recipe_append_path = os.path.join(self.testlayer_path, 'recipes-test', test_recipe, recipe_append_file)
+ feature = 'SUMMARY += "test locked signature"\n'
+
+ os.mkdir(os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
+ write_file(recipe_append_path, feature)
+
+ self.add_command_to_tearDown('rm -rf %s' % os.path.join(self.testlayer_path, 'recipes-test', test_recipe))
+
+ # Build the recipe again
+ ret = bitbake(test_recipe)
+
+ # Verify you get the warning and that the real task *isn't* run (i.e. the locked signature has worked)
+ patt = r'WARNING: The %s:do_package sig is computed to be \S+, but the sig is locked to \S+ in SIGGEN_LOCKEDSIGS\S+' % test_recipe
+ found_warn = re.search(patt, ret.output)
+
+ self.assertIsNotNone(found_warn, "Didn't find the expected warning message. Output: %s" % ret.output)
diff --git a/poky/meta/lib/oeqa/selftest/cases/sstate.py b/poky/meta/lib/oeqa/selftest/cases/sstate.py
new file mode 100644
index 000000000..bc2fdbd8c
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/sstate.py
@@ -0,0 +1,63 @@
+import datetime
+import unittest
+import os
+import re
+import shutil
+
+import oeqa.utils.ftools as ftools
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_vars, get_test_layer
+
+
+class SStateBase(OESelftestTestCase):
+
+ def setUpLocal(self):
+ super(SStateBase, self).setUpLocal()
+ self.temp_sstate_location = None
+ needed_vars = ['SSTATE_DIR', 'NATIVELSBSTRING', 'TCLIBC', 'TUNE_ARCH',
+ 'TOPDIR', 'TARGET_VENDOR', 'TARGET_OS']
+ bb_vars = get_bb_vars(needed_vars)
+ self.sstate_path = bb_vars['SSTATE_DIR']
+ self.hostdistro = bb_vars['NATIVELSBSTRING']
+ self.tclibc = bb_vars['TCLIBC']
+ self.tune_arch = bb_vars['TUNE_ARCH']
+ self.topdir = bb_vars['TOPDIR']
+ self.target_vendor = bb_vars['TARGET_VENDOR']
+ self.target_os = bb_vars['TARGET_OS']
+ self.distro_specific_sstate = os.path.join(self.sstate_path, self.hostdistro)
+
+ # Creates a special sstate configuration with the option to add sstate mirrors
+ def config_sstate(self, temp_sstate_location=False, add_local_mirrors=[]):
+ self.temp_sstate_location = temp_sstate_location
+
+ if self.temp_sstate_location:
+ temp_sstate_path = os.path.join(self.builddir, "temp_sstate_%s" % datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
+ config_temp_sstate = "SSTATE_DIR = \"%s\"" % temp_sstate_path
+ self.append_config(config_temp_sstate)
+ self.track_for_cleanup(temp_sstate_path)
+ bb_vars = get_bb_vars(['SSTATE_DIR', 'NATIVELSBSTRING'])
+ self.sstate_path = bb_vars['SSTATE_DIR']
+ self.hostdistro = bb_vars['NATIVELSBSTRING']
+ self.distro_specific_sstate = os.path.join(self.sstate_path, self.hostdistro)
+
+ if add_local_mirrors:
+ config_set_sstate_if_not_set = 'SSTATE_MIRRORS ?= ""'
+ self.append_config(config_set_sstate_if_not_set)
+ for local_mirror in add_local_mirrors:
+ self.assertFalse(os.path.join(local_mirror) == os.path.join(self.sstate_path), msg='Cannot add the current sstate path as a sstate mirror')
+ config_sstate_mirror = "SSTATE_MIRRORS += \"file://.* file:///%s/PATH\"" % local_mirror
+ self.append_config(config_sstate_mirror)
+
+ # Returns a list containing sstate files
+ def search_sstate(self, filename_regex, distro_specific=True, distro_nonspecific=True):
+ result = []
+ for root, dirs, files in os.walk(self.sstate_path):
+ if distro_specific and re.search("%s/[a-z0-9]{2}$" % self.hostdistro, root):
+ for f in files:
+ if re.search(filename_regex, f):
+ result.append(f)
+ if distro_nonspecific and re.search("%s/[a-z0-9]{2}$" % self.sstate_path, root):
+ for f in files:
+ if re.search(filename_regex, f):
+ result.append(f)
+ return result
diff --git a/poky/meta/lib/oeqa/selftest/cases/sstatetests.py b/poky/meta/lib/oeqa/selftest/cases/sstatetests.py
new file mode 100644
index 000000000..7b008e409
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/sstatetests.py
@@ -0,0 +1,532 @@
+import os
+import shutil
+import glob
+import subprocess
+import tempfile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer, create_temp_layer
+from oeqa.selftest.cases.sstate import SStateBase
+from oeqa.core.decorator.oeid import OETestID
+
+import bb.siggen
+
+class SStateTests(SStateBase):
+ def test_autorev_sstate_works(self):
+ # Test that a git repository which changes is correctly handled by SRCREV = ${AUTOREV}
+ # when PV does not contain SRCPV
+
+ tempdir = tempfile.mkdtemp(prefix='oeqa')
+ self.track_for_cleanup(tempdir)
+ create_temp_layer(tempdir, 'selftestrecipetool')
+ self.add_command_to_tearDown('bitbake-layers remove-layer %s' % tempdir)
+ runCmd('bitbake-layers add-layer %s' % tempdir)
+
+ # Use dbus-wait as a local git repo we can add a commit between two builds in
+ pn = 'dbus-wait'
+ srcrev = '6cc6077a36fe2648a5f993fe7c16c9632f946517'
+ url = 'git://git.yoctoproject.org/dbus-wait'
+ result = runCmd('git clone %s noname' % url, cwd=tempdir)
+ srcdir = os.path.join(tempdir, 'noname')
+ result = runCmd('git reset --hard %s' % srcrev, cwd=srcdir)
+ self.assertTrue(os.path.isfile(os.path.join(srcdir, 'configure.ac')), 'Unable to find configure script in source directory')
+
+ recipefile = os.path.join(tempdir, "recipes-test", "dbus-wait-test", 'dbus-wait-test_git.bb')
+ os.makedirs(os.path.dirname(recipefile))
+ srcuri = 'git://' + srcdir + ';protocol=file'
+ result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
+ self.assertTrue(os.path.isfile(recipefile), 'recipetool did not create recipe file; output:\n%s' % result.output)
+
+ with open(recipefile, 'a') as f:
+ f.write('SRCREV = "${AUTOREV}"\n')
+ f.write('PV = "1.0"\n')
+
+ bitbake("dbus-wait-test -c fetch")
+ with open(os.path.join(srcdir, "bar.txt"), "w") as f:
+ f.write("foo")
+ result = runCmd('git add bar.txt; git commit -asm "add bar"', cwd=srcdir)
+ bitbake("dbus-wait-test -c unpack")
+
+
+ # Test sstate files creation and their location
+ def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True):
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
+
+ if self.temp_sstate_location:
+ bitbake(['-cclean'] + targets)
+ else:
+ bitbake(['-ccleansstate'] + targets)
+
+ bitbake(targets)
+ file_tracker = []
+ results = self.search_sstate('|'.join(map(str, targets)), distro_specific, distro_nonspecific)
+ if distro_nonspecific:
+ for r in results:
+ if r.endswith(("_populate_lic.tgz", "_populate_lic.tgz.siginfo", "_fetch.tgz.siginfo", "_unpack.tgz.siginfo", "_patch.tgz.siginfo")):
+ continue
+ file_tracker.append(r)
+ else:
+ file_tracker = results
+
+ if should_pass:
+ self.assertTrue(file_tracker , msg="Could not find sstate files for: %s" % ', '.join(map(str, targets)))
+ else:
+ self.assertTrue(not file_tracker , msg="Found sstate files in the wrong place for: %s (found %s)" % (', '.join(map(str, targets)), str(file_tracker)))
+
+ @OETestID(975)
+ def test_sstate_creation_distro_specific_pass(self):
+ self.run_test_sstate_creation(['binutils-cross-'+ self.tune_arch, 'binutils-native'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
+
+ @OETestID(1374)
+ def test_sstate_creation_distro_specific_fail(self):
+ self.run_test_sstate_creation(['binutils-cross-'+ self.tune_arch, 'binutils-native'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True, should_pass=False)
+
+ @OETestID(976)
+ def test_sstate_creation_distro_nonspecific_pass(self):
+ self.run_test_sstate_creation(['linux-libc-headers'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
+
+ @OETestID(1375)
+ def test_sstate_creation_distro_nonspecific_fail(self):
+ self.run_test_sstate_creation(['linux-libc-headers'], distro_specific=True, distro_nonspecific=False, temp_sstate_location=True, should_pass=False)
+
+ # Test the sstate files deletion part of the do_cleansstate task
+ def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True):
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
+
+ bitbake(['-ccleansstate'] + targets)
+
+ bitbake(targets)
+ tgz_created = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
+ self.assertTrue(tgz_created, msg="Could not find sstate .tgz files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_created)))
+
+ siginfo_created = self.search_sstate('|'.join(map(str, [s + '.*?\.siginfo$' for s in targets])), distro_specific, distro_nonspecific)
+ self.assertTrue(siginfo_created, msg="Could not find sstate .siginfo files for: %s (%s)" % (', '.join(map(str, targets)), str(siginfo_created)))
+
+ bitbake(['-ccleansstate'] + targets)
+ tgz_removed = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific, distro_nonspecific)
+ self.assertTrue(not tgz_removed, msg="do_cleansstate didn't remove .tgz sstate files for: %s (%s)" % (', '.join(map(str, targets)), str(tgz_removed)))
+
+ @OETestID(977)
+ def test_cleansstate_task_distro_specific_nonspecific(self):
+ targets = ['binutils-cross-'+ self.tune_arch, 'binutils-native']
+ targets.append('linux-libc-headers')
+ self.run_test_cleansstate_task(targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True)
+
+ @OETestID(1376)
+ def test_cleansstate_task_distro_nonspecific(self):
+ self.run_test_cleansstate_task(['linux-libc-headers'], distro_specific=False, distro_nonspecific=True, temp_sstate_location=True)
+
+ @OETestID(1377)
+ def test_cleansstate_task_distro_specific(self):
+ targets = ['binutils-cross-'+ self.tune_arch, 'binutils-native']
+ targets.append('linux-libc-headers')
+ self.run_test_cleansstate_task(targets, distro_specific=True, distro_nonspecific=False, temp_sstate_location=True)
+
+
+ # Test rebuilding of distro-specific sstate files
+ def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True):
+ self.config_sstate(temp_sstate_location, [self.sstate_path])
+
+ bitbake(['-ccleansstate'] + targets)
+
+ bitbake(targets)
+ results = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=False, distro_nonspecific=True)
+ filtered_results = []
+ for r in results:
+ if r.endswith(("_populate_lic.tgz", "_populate_lic.tgz.siginfo")):
+ continue
+ filtered_results.append(r)
+ self.assertTrue(filtered_results == [], msg="Found distro non-specific sstate for: %s (%s)" % (', '.join(map(str, targets)), str(filtered_results)))
+ file_tracker_1 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ self.assertTrue(len(file_tracker_1) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+
+ self.track_for_cleanup(self.distro_specific_sstate + "_old")
+ shutil.copytree(self.distro_specific_sstate, self.distro_specific_sstate + "_old")
+ shutil.rmtree(self.distro_specific_sstate)
+
+ bitbake(['-cclean'] + targets)
+ bitbake(targets)
+ file_tracker_2 = self.search_sstate('|'.join(map(str, [s + '.*?\.tgz$' for s in targets])), distro_specific=True, distro_nonspecific=False)
+ self.assertTrue(len(file_tracker_2) >= len(targets), msg = "Not all sstate files ware created for: %s" % ', '.join(map(str, targets)))
+
+ not_recreated = [x for x in file_tracker_1 if x not in file_tracker_2]
+ self.assertTrue(not_recreated == [], msg="The following sstate files ware not recreated: %s" % ', '.join(map(str, not_recreated)))
+
+ created_once = [x for x in file_tracker_2 if x not in file_tracker_1]
+ self.assertTrue(created_once == [], msg="The following sstate files ware created only in the second run: %s" % ', '.join(map(str, created_once)))
+
+ @OETestID(175)
+ def test_rebuild_distro_specific_sstate_cross_native_targets(self):
+ self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch, 'binutils-native'], temp_sstate_location=True)
+
+ @OETestID(1372)
+ def test_rebuild_distro_specific_sstate_cross_target(self):
+ self.run_test_rebuild_distro_specific_sstate(['binutils-cross-' + self.tune_arch], temp_sstate_location=True)
+
+ @OETestID(1373)
+ def test_rebuild_distro_specific_sstate_native_target(self):
+ self.run_test_rebuild_distro_specific_sstate(['binutils-native'], temp_sstate_location=True)
+
+
+ # Test the sstate-cache-management script. Each element in the global_config list is used with the corresponding element in the target_config list
+ # global_config elements are expected to not generate any sstate files that would be removed by sstate-cache-management.sh (such as changing the value of MACHINE)
+ def run_test_sstate_cache_management_script(self, target, global_config=[''], target_config=[''], ignore_patterns=[]):
+ self.assertTrue(global_config)
+ self.assertTrue(target_config)
+ self.assertTrue(len(global_config) == len(target_config), msg='Lists global_config and target_config should have the same number of elements')
+ self.config_sstate(temp_sstate_location=True, add_local_mirrors=[self.sstate_path])
+
+ # If buildhistory is enabled, we need to disable version-going-backwards
+ # QA checks for this test. It may report errors otherwise.
+ self.append_config('ERROR_QA_remove = "version-going-backwards"')
+
+ # For not this only checks if random sstate tasks are handled correctly as a group.
+ # In the future we should add control over what tasks we check for.
+
+ sstate_archs_list = []
+ expected_remaining_sstate = []
+ for idx in range(len(target_config)):
+ self.append_config(global_config[idx])
+ self.append_recipeinc(target, target_config[idx])
+ sstate_arch = get_bb_var('SSTATE_PKGARCH', target)
+ if not sstate_arch in sstate_archs_list:
+ sstate_archs_list.append(sstate_arch)
+ if target_config[idx] == target_config[-1]:
+ target_sstate_before_build = self.search_sstate(target + '.*?\.tgz$')
+ bitbake("-cclean %s" % target)
+ result = bitbake(target, ignore_status=True)
+ if target_config[idx] == target_config[-1]:
+ target_sstate_after_build = self.search_sstate(target + '.*?\.tgz$')
+ expected_remaining_sstate += [x for x in target_sstate_after_build if x not in target_sstate_before_build if not any(pattern in x for pattern in ignore_patterns)]
+ self.remove_config(global_config[idx])
+ self.remove_recipeinc(target, target_config[idx])
+ self.assertEqual(result.status, 0, msg = "build of %s failed with %s" % (target, result.output))
+
+ runCmd("sstate-cache-management.sh -y --cache-dir=%s --remove-duplicated --extra-archs=%s" % (self.sstate_path, ','.join(map(str, sstate_archs_list))))
+ actual_remaining_sstate = [x for x in self.search_sstate(target + '.*?\.tgz$') if not any(pattern in x for pattern in ignore_patterns)]
+
+ actual_not_expected = [x for x in actual_remaining_sstate if x not in expected_remaining_sstate]
+ self.assertFalse(actual_not_expected, msg="Files should have been removed but ware not: %s" % ', '.join(map(str, actual_not_expected)))
+ expected_not_actual = [x for x in expected_remaining_sstate if x not in actual_remaining_sstate]
+ self.assertFalse(expected_not_actual, msg="Extra files ware removed: %s" ', '.join(map(str, expected_not_actual)))
+
+ @OETestID(973)
+ def test_sstate_cache_management_script_using_pr_1(self):
+ global_config = []
+ target_config = []
+ global_config.append('')
+ target_config.append('PR = "0"')
+ self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
+
+ @OETestID(978)
+ def test_sstate_cache_management_script_using_pr_2(self):
+ global_config = []
+ target_config = []
+ global_config.append('')
+ target_config.append('PR = "0"')
+ global_config.append('')
+ target_config.append('PR = "1"')
+ self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
+
+ @OETestID(979)
+ def test_sstate_cache_management_script_using_pr_3(self):
+ global_config = []
+ target_config = []
+ global_config.append('MACHINE = "qemux86-64"')
+ target_config.append('PR = "0"')
+ global_config.append(global_config[0])
+ target_config.append('PR = "1"')
+ global_config.append('MACHINE = "qemux86"')
+ target_config.append('PR = "1"')
+ self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
+
+ @OETestID(974)
+ def test_sstate_cache_management_script_using_machine(self):
+ global_config = []
+ target_config = []
+ global_config.append('MACHINE = "qemux86-64"')
+ target_config.append('')
+ global_config.append('MACHINE = "qemux86"')
+ target_config.append('')
+ self.run_test_sstate_cache_management_script('m4', global_config, target_config, ignore_patterns=['populate_lic'])
+
+ @OETestID(1270)
+ def test_sstate_32_64_same_hash(self):
+ """
+ The sstate checksums for both native and target should not vary whether
+ they're built on a 32 or 64 bit system. Rather than requiring two different
+ build machines and running a builds, override the variables calling uname()
+ manually and check using bitbake -S.
+ """
+
+ self.write_config("""
+MACHINE = "qemux86"
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+BUILD_ARCH = "x86_64"
+BUILD_OS = "linux"
+SDKMACHINE = "x86_64"
+PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("core-image-sato -S none")
+ self.write_config("""
+MACHINE = "qemux86"
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+BUILD_ARCH = "i686"
+BUILD_OS = "linux"
+SDKMACHINE = "i686"
+PACKAGE_CLASSES = "package_rpm package_ipk package_deb"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("core-image-sato -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ if "core-image-sato" in root:
+ # SDKMACHINE changing will change
+ # do_rootfs/do_testimage/do_build stamps of images which
+ # is safe to ignore.
+ continue
+ f.extend(os.path.join(root, name) for name in files)
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash").replace("i686-linux", "x86_64-linux").replace("i686" + self.target_vendor + "-linux", "x86_64" + self.target_vendor + "-linux", ) for x in files2]
+ self.maxDiff = None
+ self.assertCountEqual(files1, files2)
+
+
+ @OETestID(1271)
+ def test_sstate_nativelsbstring_same_hash(self):
+ """
+ The sstate checksums should be independent of whichever NATIVELSBSTRING is
+ detected. Rather than requiring two different build machines and running
+ builds, override the variables manually and check using bitbake -S.
+ """
+
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+NATIVELSBSTRING = \"DistroA\"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("core-image-sato -S none")
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+NATIVELSBSTRING = \"DistroB\"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("core-image-sato -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ f.extend(os.path.join(root, name) for name in files)
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
+ self.assertCountEqual(files1, files2)
+
+ @OETestID(1368)
+ def test_sstate_allarch_samesigs(self):
+ """
+ The sstate checksums of allarch packages should be independent of whichever
+ MACHINE is set. Check this using bitbake -S.
+ Also, rather than duplicate the test, check nativesdk stamps are the same between
+ the two MACHINE values.
+ """
+
+ configA = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+MACHINE = \"qemux86-64\"
+"""
+ configB = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+MACHINE = \"qemuarm\"
+"""
+ self.sstate_allarch_samesigs(configA, configB)
+
+ @OETestID(1645)
+ def test_sstate_allarch_samesigs_multilib(self):
+ """
+ The sstate checksums of allarch multilib packages should be independent of whichever
+ MACHINE is set. Check this using bitbake -S.
+ Also, rather than duplicate the test, check nativesdk stamps are the same between
+ the two MACHINE values.
+ """
+
+ configA = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+MACHINE = \"qemux86-64\"
+require conf/multilib.conf
+MULTILIBS = \"multilib:lib32\"
+DEFAULTTUNE_virtclass-multilib-lib32 = \"x86\"
+"""
+ configB = """
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+MACHINE = \"qemuarm\"
+require conf/multilib.conf
+MULTILIBS = \"\"
+"""
+ self.sstate_allarch_samesigs(configA, configB)
+
+ def sstate_allarch_samesigs(self, configA, configB):
+
+ self.write_config(configA)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("world meta-toolchain -S none")
+ self.write_config(configB)
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("world meta-toolchain -S none")
+
+ def get_files(d):
+ f = {}
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ if "meta-environment" in root or "cross-canadian" in root:
+ continue
+ if "do_build" not in name:
+ # 1.4.1+gitAUTOINC+302fca9f4c-r0.do_package_write_ipk.sigdata.f3a2a38697da743f0dbed8b56aafcf79
+ (_, task, _, shash) = name.rsplit(".", 3)
+ f[os.path.join(os.path.basename(root), task)] = shash
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/all" + self.target_vendor + "-" + self.target_os)
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/all" + self.target_vendor + "-" + self.target_os)
+ self.maxDiff = None
+ self.assertEqual(files1, files2)
+
+ nativesdkdir = os.path.basename(glob.glob(self.topdir + "/tmp-sstatesamehash/stamps/*-nativesdk*-linux")[0])
+
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/" + nativesdkdir)
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/" + nativesdkdir)
+ self.maxDiff = None
+ self.assertEqual(files1, files2)
+
+ @OETestID(1369)
+ def test_sstate_sametune_samesigs(self):
+ """
+ The sstate checksums of two identical machines (using the same tune) should be the
+ same, apart from changes within the machine specific stamps directory. We use the
+ qemux86copy machine to test this. Also include multilibs in the test.
+ """
+
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash\"
+MACHINE = \"qemux86\"
+require conf/multilib.conf
+MULTILIBS = "multilib:lib32"
+DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ bitbake("world meta-toolchain -S none")
+ self.write_config("""
+TMPDIR = \"${TOPDIR}/tmp-sstatesamehash2\"
+MACHINE = \"qemux86copy\"
+require conf/multilib.conf
+MULTILIBS = "multilib:lib32"
+DEFAULTTUNE_virtclass-multilib-lib32 = "x86"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ bitbake("world meta-toolchain -S none")
+
+ def get_files(d):
+ f = []
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ if "meta-environment" in root or "cross-canadian" in root:
+ continue
+ if "qemux86copy-" in root or "qemux86-" in root:
+ continue
+ if "do_build" not in name and "do_populate_sdk" not in name:
+ f.append(os.path.join(root, name))
+ return f
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps")
+ files2 = [x.replace("tmp-sstatesamehash2", "tmp-sstatesamehash") for x in files2]
+ self.maxDiff = None
+ self.assertCountEqual(files1, files2)
+
+
+ @OETestID(1498)
+ def test_sstate_noop_samesigs(self):
+ """
+ The sstate checksums of two builds with these variables changed or
+ classes inherits should be the same.
+ """
+
+ self.write_config("""
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash"
+BB_NUMBER_THREADS = "${@oe.utils.cpu_count()}"
+PARALLEL_MAKE = "-j 1"
+DL_DIR = "${TOPDIR}/download1"
+TIME = "111111"
+DATE = "20161111"
+INHERIT_remove = "buildstats-summary buildhistory uninative"
+http_proxy = ""
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash")
+ self.track_for_cleanup(self.topdir + "/download1")
+ bitbake("world meta-toolchain -S none")
+ self.write_config("""
+TMPDIR = "${TOPDIR}/tmp-sstatesamehash2"
+BB_NUMBER_THREADS = "${@oe.utils.cpu_count()+1}"
+PARALLEL_MAKE = "-j 2"
+DL_DIR = "${TOPDIR}/download2"
+TIME = "222222"
+DATE = "20161212"
+# Always remove uninative as we're changing proxies
+INHERIT_remove = "uninative"
+INHERIT += "buildstats-summary buildhistory"
+http_proxy = "http://example.com/"
+""")
+ self.track_for_cleanup(self.topdir + "/tmp-sstatesamehash2")
+ self.track_for_cleanup(self.topdir + "/download2")
+ bitbake("world meta-toolchain -S none")
+
+ def get_files(d):
+ f = {}
+ for root, dirs, files in os.walk(d):
+ for name in files:
+ name, shash = name.rsplit('.', 1)
+ # Extract just the machine and recipe name
+ base = os.sep.join(root.rsplit(os.sep, 2)[-2:] + [name])
+ f[base] = shash
+ return f
+
+ def compare_sigfiles(files, files1, files2, compare=False):
+ for k in files:
+ if k in files1 and k in files2:
+ print("%s differs:" % k)
+ if compare:
+ sigdatafile1 = self.topdir + "/tmp-sstatesamehash/stamps/" + k + "." + files1[k]
+ sigdatafile2 = self.topdir + "/tmp-sstatesamehash2/stamps/" + k + "." + files2[k]
+ output = bb.siggen.compare_sigfiles(sigdatafile1, sigdatafile2)
+ if output:
+ print('\n'.join(output))
+ elif k in files1 and k not in files2:
+ print("%s in files1" % k)
+ elif k not in files1 and k in files2:
+ print("%s in files2" % k)
+ else:
+ assert "shouldn't reach here"
+
+ files1 = get_files(self.topdir + "/tmp-sstatesamehash/stamps/")
+ files2 = get_files(self.topdir + "/tmp-sstatesamehash2/stamps/")
+ # Remove items that are identical in both sets
+ for k,v in files1.items() & files2.items():
+ del files1[k]
+ del files2[k]
+ if not files1 and not files2:
+ # No changes, so we're done
+ return
+
+ files = list(files1.keys() | files2.keys())
+ # this is an expensive computation, thus just compare the first 'max_sigfiles_to_compare' k files
+ max_sigfiles_to_compare = 20
+ first, rest = files[:max_sigfiles_to_compare], files[max_sigfiles_to_compare:]
+ compare_sigfiles(first, files1, files2, compare=True)
+ compare_sigfiles(rest, files1, files2, compare=False)
+
+ self.fail("sstate hashes not identical.")
diff --git a/poky/meta/lib/oeqa/selftest/cases/tinfoil.py b/poky/meta/lib/oeqa/selftest/cases/tinfoil.py
new file mode 100644
index 000000000..f889a47b2
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/tinfoil.py
@@ -0,0 +1,231 @@
+import os
+import re
+import time
+import logging
+import bb.tinfoil
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd
+from oeqa.core.decorator.oeid import OETestID
+
+class TinfoilTests(OESelftestTestCase):
+ """ Basic tests for the tinfoil API """
+
+ @OETestID(1568)
+ def test_getvar(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ machine = tinfoil.config_data.getVar('MACHINE')
+ if not machine:
+ self.fail('Unable to get MACHINE value - returned %s' % machine)
+
+ @OETestID(1569)
+ def test_expand(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ expr = '${@os.getpid()}'
+ pid = tinfoil.config_data.expand(expr)
+ if not pid:
+ self.fail('Unable to expand "%s" - returned %s' % (expr, pid))
+
+ @OETestID(1570)
+ def test_getvar_bb_origenv(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(True)
+ origenv = tinfoil.config_data.getVar('BB_ORIGENV', False)
+ if not origenv:
+ self.fail('Unable to get BB_ORIGENV value - returned %s' % origenv)
+ self.assertEqual(origenv.getVar('HOME', False), os.environ['HOME'])
+
+ @OETestID(1571)
+ def test_parse_recipe(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3])
+ self.assertEqual(testrecipe, rd.getVar('PN'))
+
+ @OETestID(1572)
+ def test_parse_recipe_copy_expand(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ rd = tinfoil.parse_recipe_file(best[3])
+ # Check we can get variable values
+ self.assertEqual(testrecipe, rd.getVar('PN'))
+ # Check that expanding a value that includes a variable reference works
+ self.assertEqual(testrecipe, rd.getVar('BPN'))
+ # Now check that changing the referenced variable's value in a copy gives that
+ # value when expanding
+ localdata = bb.data.createCopy(rd)
+ localdata.setVar('PN', 'hello')
+ self.assertEqual('hello', localdata.getVar('BPN'))
+
+ @OETestID(1573)
+ def test_parse_recipe_initial_datastore(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ testrecipe = 'mdadm'
+ best = tinfoil.find_best_provider(testrecipe)
+ if not best:
+ self.fail('Unable to find recipe providing %s' % testrecipe)
+ dcopy = bb.data.createCopy(tinfoil.config_data)
+ dcopy.setVar('MYVARIABLE', 'somevalue')
+ rd = tinfoil.parse_recipe_file(best[3], config_data=dcopy)
+ # Check we can get variable values
+ self.assertEqual('somevalue', rd.getVar('MYVARIABLE'))
+
+ @OETestID(1574)
+ def test_list_recipes(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ # Check pkg_pn
+ checkpns = ['tar', 'automake', 'coreutils', 'm4-native', 'nativesdk-gcc']
+ pkg_pn = tinfoil.cooker.recipecaches[''].pkg_pn
+ for pn in checkpns:
+ self.assertIn(pn, pkg_pn)
+ # Check pkg_fn
+ checkfns = {'nativesdk-gcc': '^virtual:nativesdk:.*', 'coreutils': '.*/coreutils_.*.bb'}
+ for fn, pn in tinfoil.cooker.recipecaches[''].pkg_fn.items():
+ if pn in checkpns:
+ if pn in checkfns:
+ self.assertTrue(re.match(checkfns[pn], fn), 'Entry for %s: %s did not match %s' % (pn, fn, checkfns[pn]))
+ checkpns.remove(pn)
+ if checkpns:
+ self.fail('Unable to find pkg_fn entries for: %s' % ', '.join(checkpns))
+
+ @OETestID(1575)
+ def test_wait_event(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ tinfoil.set_event_mask(['bb.event.FilesMatchingFound', 'bb.command.CommandCompleted'])
+
+ # Need to drain events otherwise events that were masked may still be in the queue
+ while tinfoil.wait_event():
+ pass
+
+ pattern = 'conf'
+ res = tinfoil.run_command('findFilesMatchingInDir', pattern, 'conf/machine')
+ self.assertTrue(res)
+
+ eventreceived = False
+ commandcomplete = False
+ start = time.time()
+ # Wait for 5s in total so we'd detect spurious heartbeat events for example
+ while time.time() - start < 5:
+ event = tinfoil.wait_event(1)
+ if event:
+ if isinstance(event, bb.command.CommandCompleted):
+ commandcomplete = True
+ elif isinstance(event, bb.event.FilesMatchingFound):
+ self.assertEqual(pattern, event._pattern)
+ self.assertIn('qemuarm.conf', event._matches)
+ eventreceived = True
+ elif isinstance(event, logging.LogRecord):
+ continue
+ else:
+ self.fail('Unexpected event: %s' % event)
+
+ self.assertTrue(commandcomplete, 'Timed out waiting for CommandCompleted event from bitbake server')
+ self.assertTrue(eventreceived, 'Did not receive FilesMatchingFound event from bitbake server')
+
+ @OETestID(1576)
+ def test_setvariable_clean(self):
+ # First check that setVariable affects the datastore
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ tinfoil.run_command('setVariable', 'TESTVAR', 'specialvalue')
+ self.assertEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is not reflected in client-side getVar()')
+
+ # Now check that the setVariable's effects are no longer present
+ # (this may legitimately break in future if we stop reinitialising
+ # the datastore, in which case we'll have to reconsider use of
+ # setVariable entirely)
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ self.assertNotEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is still present!')
+
+ # Now check that setVar on the main datastore works (uses setVariable internally)
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ tinfoil.config_data.setVar('TESTVAR', 'specialvalue')
+ value = tinfoil.run_command('getVariable', 'TESTVAR')
+ self.assertEqual(value, 'specialvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
+
+ @OETestID(1884)
+ def test_datastore_operations(self):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ # Test setVarFlag() / getVarFlag()
+ tinfoil.config_data.setVarFlag('TESTVAR', 'flagname', 'flagval')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
+ self.assertEqual(value, 'flagval', 'Value set using config_data.setVarFlag() is not reflected in config_data.getVarFlag()')
+ # Test delVarFlag()
+ tinfoil.config_data.setVarFlag('TESTVAR', 'otherflag', 'othervalue')
+ tinfoil.config_data.delVarFlag('TESTVAR', 'flagname')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
+ self.assertEqual(value, None, 'Varflag deleted using config_data.delVarFlag() is not reflected in config_data.getVarFlag()')
+ value = tinfoil.config_data.getVarFlag('TESTVAR', 'otherflag')
+ self.assertEqual(value, 'othervalue', 'Varflag deleted using config_data.delVarFlag() caused unrelated flag to be removed')
+ # Test delVar()
+ tinfoil.config_data.setVar('TESTVAR', 'varvalue')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, 'varvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
+ tinfoil.config_data.delVar('TESTVAR')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, None, 'Variable deleted using config_data.delVar() appears to still have a value')
+ # Test renameVar()
+ tinfoil.config_data.setVar('TESTVAROLD', 'origvalue')
+ tinfoil.config_data.renameVar('TESTVAROLD', 'TESTVARNEW')
+ value = tinfoil.config_data.getVar('TESTVAROLD')
+ self.assertEqual(value, None, 'Variable renamed using config_data.renameVar() still seems to exist')
+ value = tinfoil.config_data.getVar('TESTVARNEW')
+ self.assertEqual(value, 'origvalue', 'Variable renamed using config_data.renameVar() does not appear with new name')
+ # Test overrides
+ tinfoil.config_data.setVar('TESTVAR', 'original')
+ tinfoil.config_data.setVar('TESTVAR_overrideone', 'one')
+ tinfoil.config_data.setVar('TESTVAR_overridetwo', 'two')
+ tinfoil.config_data.appendVar('OVERRIDES', ':overrideone')
+ value = tinfoil.config_data.getVar('TESTVAR')
+ self.assertEqual(value, 'one', 'Variable overrides not functioning correctly')
+
+ def test_variable_history(self):
+ # Basic test to ensure that variable history works when tracking=True
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ tinfoil.prepare(config_only=False, quiet=2)
+ # Note that _tracking for any datastore we get will be
+ # false here, that's currently expected - so we can't check
+ # for that
+ history = tinfoil.config_data.varhistory.variable('DL_DIR')
+ for entry in history:
+ if entry['file'].endswith('/bitbake.conf'):
+ if entry['op'] in ['set', 'set?']:
+ break
+ else:
+ self.fail('Did not find history entry setting DL_DIR in bitbake.conf. History: %s' % history)
+ # Check it works for recipes as well
+ testrecipe = 'zlib'
+ rd = tinfoil.parse_recipe(testrecipe)
+ history = rd.varhistory.variable('LICENSE')
+ bbfound = -1
+ recipefound = -1
+ for i, entry in enumerate(history):
+ if entry['file'].endswith('/bitbake.conf'):
+ if entry['detail'] == 'INVALID' and entry['op'] in ['set', 'set?']:
+ bbfound = i
+ elif entry['file'].endswith('.bb'):
+ if entry['op'] == 'set':
+ recipefound = i
+ if bbfound == -1:
+ self.fail('Did not find history entry setting LICENSE in bitbake.conf parsing %s recipe. History: %s' % (testrecipe, history))
+ if recipefound == -1:
+ self.fail('Did not find history entry setting LICENSE in %s recipe. History: %s' % (testrecipe, history))
+ if bbfound > recipefound:
+ self.fail('History entry setting LICENSE in %s recipe and in bitbake.conf in wrong order. History: %s' % (testrecipe, history))
diff --git a/poky/meta/lib/oeqa/selftest/cases/wic.py b/poky/meta/lib/oeqa/selftest/cases/wic.py
new file mode 100644
index 000000000..b84466d9a
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/wic.py
@@ -0,0 +1,1066 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2015, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# AUTHORS
+# Ed Bartosh <ed.bartosh@linux.intel.com>
+
+"""Test cases for wic."""
+
+import os
+import sys
+import unittest
+
+from glob import glob
+from shutil import rmtree, copy
+from functools import wraps, lru_cache
+from tempfile import NamedTemporaryFile
+
+from oeqa.selftest.case import OESelftestTestCase
+from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars, runqemu
+from oeqa.core.decorator.oeid import OETestID
+
+
+@lru_cache(maxsize=32)
+def get_host_arch(recipe):
+ """A cached call to get_bb_var('HOST_ARCH', <recipe>)"""
+ return get_bb_var('HOST_ARCH', recipe)
+
+
+def only_for_arch(archs, image='core-image-minimal'):
+ """Decorator for wrapping test cases that can be run only for specific target
+ architectures. A list of compatible architectures is passed in `archs`.
+ Current architecture will be determined by parsing bitbake output for
+ `image` recipe.
+ """
+ def wrapper(func):
+ @wraps(func)
+ def wrapped_f(*args, **kwargs):
+ arch = get_host_arch(image)
+ if archs and arch not in archs:
+ raise unittest.SkipTest("Testcase arch dependency not met: %s" % arch)
+ return func(*args, **kwargs)
+ wrapped_f.__name__ = func.__name__
+ return wrapped_f
+ return wrapper
+
+
+class Wic(OESelftestTestCase):
+ """Wic test class."""
+
+ resultdir = "/var/tmp/wic.oe-selftest/"
+ image_is_ready = False
+ native_sysroot = None
+ wicenv_cache = {}
+
+ def setUpLocal(self):
+ """This code is executed before each test method."""
+ super(Wic, self).setUpLocal()
+ if not self.native_sysroot:
+ Wic.native_sysroot = get_bb_var('STAGING_DIR_NATIVE', 'wic-tools')
+
+ # Do this here instead of in setUpClass as the base setUp does some
+ # clean up which can result in the native tools built earlier in
+ # setUpClass being unavailable.
+ if not Wic.image_is_ready:
+ if get_bb_var('USE_NLS') == 'yes':
+ bitbake('wic-tools')
+ else:
+ self.skipTest('wic-tools cannot be built due its (intltool|gettext)-native dependency and NLS disable')
+
+ bitbake('core-image-minimal')
+ Wic.image_is_ready = True
+
+ rmtree(self.resultdir, ignore_errors=True)
+
+ def tearDownLocal(self):
+ """Remove resultdir as it may contain images."""
+ rmtree(self.resultdir, ignore_errors=True)
+ super(Wic, self).tearDownLocal()
+
+ @OETestID(1552)
+ def test_version(self):
+ """Test wic --version"""
+ self.assertEqual(0, runCmd('wic --version').status)
+
+ @OETestID(1208)
+ def test_help(self):
+ """Test wic --help and wic -h"""
+ self.assertEqual(0, runCmd('wic --help').status)
+ self.assertEqual(0, runCmd('wic -h').status)
+
+ @OETestID(1209)
+ def test_createhelp(self):
+ """Test wic create --help"""
+ self.assertEqual(0, runCmd('wic create --help').status)
+
+ @OETestID(1210)
+ def test_listhelp(self):
+ """Test wic list --help"""
+ self.assertEqual(0, runCmd('wic list --help').status)
+
+ @OETestID(1553)
+ def test_help_create(self):
+ """Test wic help create"""
+ self.assertEqual(0, runCmd('wic help create').status)
+
+ @OETestID(1554)
+ def test_help_list(self):
+ """Test wic help list"""
+ self.assertEqual(0, runCmd('wic help list').status)
+
+ @OETestID(1215)
+ def test_help_overview(self):
+ """Test wic help overview"""
+ self.assertEqual(0, runCmd('wic help overview').status)
+
+ @OETestID(1216)
+ def test_help_plugins(self):
+ """Test wic help plugins"""
+ self.assertEqual(0, runCmd('wic help plugins').status)
+
+ @OETestID(1217)
+ def test_help_kickstart(self):
+ """Test wic help kickstart"""
+ self.assertEqual(0, runCmd('wic help kickstart').status)
+
+ @OETestID(1555)
+ def test_list_images(self):
+ """Test wic list images"""
+ self.assertEqual(0, runCmd('wic list images').status)
+
+ @OETestID(1556)
+ def test_list_source_plugins(self):
+ """Test wic list source-plugins"""
+ self.assertEqual(0, runCmd('wic list source-plugins').status)
+
+ @OETestID(1557)
+ def test_listed_images_help(self):
+ """Test wic listed images help"""
+ output = runCmd('wic list images').output
+ imagelist = [line.split()[0] for line in output.splitlines()]
+ for image in imagelist:
+ self.assertEqual(0, runCmd('wic list %s help' % image).status)
+
+ @OETestID(1213)
+ def test_unsupported_subcommand(self):
+ """Test unsupported subcommand"""
+ self.assertNotEqual(0, runCmd('wic unsupported', ignore_status=True).status)
+
+ @OETestID(1214)
+ def test_no_command(self):
+ """Test wic without command"""
+ self.assertEqual(1, runCmd('wic', ignore_status=True).status)
+
+ @OETestID(1211)
+ def test_build_image_name(self):
+ """Test wic create wictestdisk --image-name=core-image-minimal"""
+ cmd = "wic create wictestdisk --image-name=core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1157)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_gpt_image(self):
+ """Test creation of core-image-minimal with gpt table and UUID boot"""
+ cmd = "wic create directdisk-gpt --image-name core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
+
+ @OETestID(1346)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_iso_image(self):
+ """Test creation of hybrid iso image with legacy and EFI boot"""
+ config = 'INITRAMFS_IMAGE = "core-image-minimal-initramfs"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'\
+ 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create mkhybridiso --image-name core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "HYBRID_ISO_IMG-*.iso")))
+
+ @OETestID(1348)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_qemux86_directdisk(self):
+ """Test creation of qemux-86-directdisk image"""
+ cmd = "wic create qemux86-directdisk -e core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "qemux86-directdisk-*direct")))
+
+ @OETestID(1350)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_mkefidisk(self):
+ """Test creation of mkefidisk image"""
+ cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "mkefidisk-*direct")))
+
+ @OETestID(1385)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_bootloader_config(self):
+ """Test creation of directdisk-bootloader-config image"""
+ config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create directdisk-bootloader-config -e core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-bootloader-config-*direct")))
+
+ @OETestID(1560)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_systemd_bootdisk(self):
+ """Test creation of systemd-bootdisk image"""
+ config = 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create systemd-bootdisk -e core-image-minimal -o %s" % self.resultdir
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct")))
+
+ @OETestID(1561)
+ def test_sdimage_bootpart(self):
+ """Test creation of sdimage-bootpart image"""
+ cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir
+ kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal')
+ self.write_config('IMAGE_BOOT_FILES = "%s"\n' % kimgtype)
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
+
+ @OETestID(1562)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_default_output_dir(self):
+ """Test default output location"""
+ for fname in glob("directdisk-*.direct"):
+ os.remove(fname)
+ config = 'DEPENDS_pn-core-image-minimal += "syslinux"\n'
+ self.append_config(config)
+ bitbake('core-image-minimal')
+ self.remove_config(config)
+ cmd = "wic create directdisk -e core-image-minimal"
+ self.assertEqual(0, runCmd(cmd).status)
+ self.assertEqual(1, len(glob("directdisk-*.direct")))
+
+ @OETestID(1212)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_build_artifacts(self):
+ """Test wic create directdisk providing all artifacts."""
+ bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'],
+ 'wic-tools')
+ bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'],
+ 'core-image-minimal'))
+ bbvars = {key.lower(): value for key, value in bb_vars.items()}
+ bbvars['resultdir'] = self.resultdir
+ status = runCmd("wic create directdisk "
+ "-b %(staging_datadir)s "
+ "-k %(deploy_dir_image)s "
+ "-n %(recipe_sysroot_native)s "
+ "-r %(image_rootfs)s "
+ "-o %(resultdir)s" % bbvars).status
+ self.assertEqual(0, status)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-*.direct")))
+
+ @OETestID(1264)
+ def test_compress_gzip(self):
+ """Test compressing an image with gzip"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name core-image-minimal "
+ "-c gzip -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.gz")))
+
+ @OETestID(1265)
+ def test_compress_bzip2(self):
+ """Test compressing an image with bzip2"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-c bzip2 -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.bz2")))
+
+ @OETestID(1266)
+ def test_compress_xz(self):
+ """Test compressing an image with xz"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--compress-with=xz -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct.xz")))
+
+ @OETestID(1267)
+ def test_wrong_compressor(self):
+ """Test how wic breaks if wrong compressor is provided"""
+ self.assertEqual(2, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-c wrong -o %s" % self.resultdir,
+ ignore_status=True).status)
+
+ @OETestID(1558)
+ def test_debug_short(self):
+ """Test -D option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1658)
+ def test_debug_long(self):
+ """Test --debug option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--debug -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1563)
+ def test_skip_build_check_short(self):
+ """Test -s option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-s -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1671)
+ def test_skip_build_check_long(self):
+ """Test --skip-build-check option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--skip-build-check "
+ "--outdir %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1564)
+ def test_build_rootfs_short(self):
+ """Test -f option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-f -o %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1656)
+ def test_build_rootfs_long(self):
+ """Test --build-rootfs option"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "--build-rootfs "
+ "--outdir %s" % self.resultdir).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*.direct")))
+
+ @OETestID(1268)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_rootfs_indirect_recipes(self):
+ """Test usage of rootfs plugin with rootfs recipes"""
+ status = runCmd("wic create directdisk-multi-rootfs "
+ "--image-name=core-image-minimal "
+ "--rootfs rootfs1=core-image-minimal "
+ "--rootfs rootfs2=core-image-minimal "
+ "--outdir %s" % self.resultdir).status
+ self.assertEqual(0, status)
+ self.assertEqual(1, len(glob(self.resultdir + "directdisk-multi-rootfs*.direct")))
+
+ @OETestID(1269)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_rootfs_artifacts(self):
+ """Test usage of rootfs plugin with rootfs paths"""
+ bb_vars = get_bb_vars(['STAGING_DATADIR', 'RECIPE_SYSROOT_NATIVE'],
+ 'wic-tools')
+ bb_vars.update(get_bb_vars(['DEPLOY_DIR_IMAGE', 'IMAGE_ROOTFS'],
+ 'core-image-minimal'))
+ bbvars = {key.lower(): value for key, value in bb_vars.items()}
+ bbvars['wks'] = "directdisk-multi-rootfs"
+ bbvars['resultdir'] = self.resultdir
+ status = runCmd("wic create %(wks)s "
+ "--bootimg-dir=%(staging_datadir)s "
+ "--kernel-dir=%(deploy_dir_image)s "
+ "--native-sysroot=%(recipe_sysroot_native)s "
+ "--rootfs-dir rootfs1=%(image_rootfs)s "
+ "--rootfs-dir rootfs2=%(image_rootfs)s "
+ "--outdir %(resultdir)s" % bbvars).status
+ self.assertEqual(0, status)
+ self.assertEqual(1, len(glob(self.resultdir + "%(wks)s-*.direct" % bbvars)))
+
+ @OETestID(1661)
+ def test_exclude_path(self):
+ """Test --exclude-path wks option."""
+
+ oldpath = os.environ['PATH']
+ os.environ['PATH'] = get_bb_var("PATH", "wic-tools")
+
+ try:
+ wks_file = 'temp.wks'
+ with open(wks_file, 'w') as wks:
+ rootfs_dir = get_bb_var('IMAGE_ROOTFS', 'core-image-minimal')
+ wks.write("""
+part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path usr
+part /usr --source rootfs --ondisk mmcblk0 --fstype=ext4 --rootfs-dir %s/usr
+part /etc --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path bin/ --rootfs-dir %s/usr"""
+ % (rootfs_dir, rootfs_dir))
+ self.assertEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir)).status)
+
+ os.remove(wks_file)
+ wicout = glob(self.resultdir + "%s-*direct" % 'temp')
+ self.assertEqual(1, len(wicout))
+
+ wicimg = wicout[0]
+
+ # verify partition size with wic
+ res = runCmd("parted -m %s unit b p 2>/dev/null" % wicimg)
+ self.assertEqual(0, res.status)
+
+ # parse parted output which looks like this:
+ # BYT;\n
+ # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n
+ # 1:0.00MiB:200MiB:200MiB:ext4::;\n
+ partlns = res.output.splitlines()[2:]
+
+ self.assertEqual(3, len(partlns))
+
+ for part in [1, 2, 3]:
+ part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part)
+ partln = partlns[part-1].split(":")
+ self.assertEqual(7, len(partln))
+ start = int(partln[1].rstrip("B")) / 512
+ length = int(partln[3].rstrip("B")) / 512
+ self.assertEqual(0, runCmd("dd if=%s of=%s skip=%d count=%d" %
+ (wicimg, part_file, start, length)).status)
+
+ def extract_files(debugfs_output):
+ """
+ extract file names from the output of debugfs -R 'ls -p',
+ which looks like this:
+
+ /2/040755/0/0/.//\n
+ /2/040755/0/0/..//\n
+ /11/040700/0/0/lost+found^M//\n
+ /12/040755/1002/1002/run//\n
+ /13/040755/1002/1002/sys//\n
+ /14/040755/1002/1002/bin//\n
+ /80/040755/1002/1002/var//\n
+ /92/040755/1002/1002/tmp//\n
+ """
+ # NOTE the occasional ^M in file names
+ return [line.split('/')[5].strip() for line in \
+ debugfs_output.strip().split('/\n')]
+
+ # Test partition 1, should contain the normal root directories, except
+ # /usr.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part1"))
+ self.assertEqual(0, res.status)
+ files = extract_files(res.output)
+ self.assertIn("etc", files)
+ self.assertNotIn("usr", files)
+
+ # Partition 2, should contain common directories for /usr, not root
+ # directories.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part2"))
+ self.assertEqual(0, res.status)
+ files = extract_files(res.output)
+ self.assertNotIn("etc", files)
+ self.assertNotIn("usr", files)
+ self.assertIn("share", files)
+
+ # Partition 3, should contain the same as partition 2, including the bin
+ # directory, but not the files inside it.
+ res = runCmd("debugfs -R 'ls -p' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part3"))
+ self.assertEqual(0, res.status)
+ files = extract_files(res.output)
+ self.assertNotIn("etc", files)
+ self.assertNotIn("usr", files)
+ self.assertIn("share", files)
+ self.assertIn("bin", files)
+ res = runCmd("debugfs -R 'ls -p bin' %s 2>/dev/null" % \
+ os.path.join(self.resultdir, "selftest_img.part3"))
+ self.assertEqual(0, res.status)
+ files = extract_files(res.output)
+ self.assertIn(".", files)
+ self.assertIn("..", files)
+ self.assertEqual(2, len(files))
+
+ for part in [1, 2, 3]:
+ part_file = os.path.join(self.resultdir, "selftest_img.part%d" % part)
+ os.remove(part_file)
+
+ finally:
+ os.environ['PATH'] = oldpath
+
+ @OETestID(1662)
+ def test_exclude_path_errors(self):
+ """Test --exclude-path wks option error handling."""
+ wks_file = 'temp.wks'
+
+ # Absolute argument.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path /usr")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ # Argument pointing to parent directory.
+ with open(wks_file, 'w') as wks:
+ wks.write("part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --exclude-path ././..")
+ self.assertNotEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wks_file, self.resultdir), ignore_status=True).status)
+ os.remove(wks_file)
+
+ @OETestID(1496)
+ def test_bmap_short(self):
+ """Test generation of .bmap file -m option"""
+ cmd = "wic create wictestdisk -e core-image-minimal -m -o %s" % self.resultdir
+ status = runCmd(cmd).status
+ self.assertEqual(0, status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
+
+ @OETestID(1655)
+ def test_bmap_long(self):
+ """Test generation of .bmap file --bmap option"""
+ cmd = "wic create wictestdisk -e core-image-minimal --bmap -o %s" % self.resultdir
+ status = runCmd(cmd).status
+ self.assertEqual(0, status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct.bmap")))
+
+ def _get_image_env_path(self, image):
+ """Generate and obtain the path to <image>.env"""
+ if image not in self.wicenv_cache:
+ self.assertEqual(0, bitbake('%s -c do_rootfs_wicenv' % image).status)
+ bb_vars = get_bb_vars(['STAGING_DIR', 'MACHINE'], image)
+ stdir = bb_vars['STAGING_DIR']
+ machine = bb_vars['MACHINE']
+ self.wicenv_cache[image] = os.path.join(stdir, machine, 'imgdata')
+ return self.wicenv_cache[image]
+
+ @OETestID(1347)
+ def test_image_env(self):
+ """Test generation of <image>.env files."""
+ image = 'core-image-minimal'
+ imgdatadir = self._get_image_env_path(image)
+
+ bb_vars = get_bb_vars(['IMAGE_BASENAME', 'WICVARS'], image)
+ basename = bb_vars['IMAGE_BASENAME']
+ self.assertEqual(basename, image)
+ path = os.path.join(imgdatadir, basename) + '.env'
+ self.assertTrue(os.path.isfile(path))
+
+ wicvars = set(bb_vars['WICVARS'].split())
+ # filter out optional variables
+ wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES',
+ 'INITRD', 'INITRD_LIVE', 'ISODIR'))
+ with open(path) as envfile:
+ content = dict(line.split("=", 1) for line in envfile)
+ # test if variables used by wic present in the .env file
+ for var in wicvars:
+ self.assertTrue(var in content, "%s is not in .env file" % var)
+ self.assertTrue(content[var])
+
+ @OETestID(1559)
+ def test_image_vars_dir_short(self):
+ """Test image vars directory selection -v option"""
+ image = 'core-image-minimal'
+ imgenvdir = self._get_image_env_path(image)
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=%s -v %s -n %s -o %s"
+ % (image, imgenvdir, native_sysroot,
+ self.resultdir)).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+
+ @OETestID(1665)
+ def test_image_vars_dir_long(self):
+ """Test image vars directory selection --vars option"""
+ image = 'core-image-minimal'
+ imgenvdir = self._get_image_env_path(image)
+ native_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=%s "
+ "--vars %s "
+ "--native-sysroot %s "
+ "--outdir %s"
+ % (image, imgenvdir, native_sysroot,
+ self.resultdir)).status)
+ self.assertEqual(1, len(glob(self.resultdir + "wictestdisk-*direct")))
+
+ @OETestID(1351)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_wic_image_type(self):
+ """Test building wic images by bitbake"""
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('wic-image-minimal').status)
+ self.remove_config(config)
+
+ bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE'])
+ deploy_dir = bb_vars['DEPLOY_DIR_IMAGE']
+ machine = bb_vars['MACHINE']
+ prefix = os.path.join(deploy_dir, 'wic-image-minimal-%s.' % machine)
+ # check if we have result image and manifests symlinks
+ # pointing to existing files
+ for suffix in ('wic', 'manifest'):
+ path = prefix + suffix
+ self.assertTrue(os.path.islink(path))
+ self.assertTrue(os.path.isfile(os.path.realpath(path)))
+
+ @OETestID(1424)
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ def test_qemu(self):
+ """Test wic-image-minimal under qemu"""
+ config = 'IMAGE_FSTYPES += "wic"\nWKS_FILE = "wic-image-minimal"\n'\
+ 'MACHINE_FEATURES_append = " efi"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('wic-image-minimal').status)
+ self.remove_config(config)
+
+ with runqemu('wic-image-minimal', ssh=False) as qemu:
+ cmd = "mount |grep '^/dev/' | cut -f1,3 -d ' ' | sort"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(output, '/dev/root /\r\n/dev/sda1 /boot\r\n/dev/sda3 /media\r\n/dev/sda4 /mnt')
+ cmd = "grep UUID= /etc/fstab"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, 'UUID=2c71ef06-a81d-4735-9d3a-379b69c6bdba\t/media\text4\tdefaults\t0\t0')
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ @OETestID(1852)
+ def test_qemu_efi(self):
+ """Test core-image-minimal efi image under qemu"""
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "mkefidisk.wks"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal ovmf').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False,
+ runqemuparams='ovmf', image_fstype='wic') as qemu:
+ cmd = "grep sda. /proc/partitions |wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '3')
+
+ @staticmethod
+ def _make_fixed_size_wks(size):
+ """
+ Create a wks of an image with a single partition. Size of the partition is set
+ using --fixed-size flag. Returns a tuple: (path to wks file, wks image name)
+ """
+ with NamedTemporaryFile("w", suffix=".wks", delete=False) as tempf:
+ wkspath = tempf.name
+ tempf.write("part " \
+ "--source rootfs --ondisk hda --align 4 --fixed-size %d "
+ "--fstype=ext4\n" % size)
+ wksname = os.path.splitext(os.path.basename(wkspath))[0]
+
+ return wkspath, wksname
+
+ @OETestID(1847)
+ def test_fixed_size(self):
+ """
+ Test creation of a simple image with partition size controlled through
+ --fixed-size flag
+ """
+ wkspath, wksname = Wic._make_fixed_size_wks(200)
+
+ self.assertEqual(0, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wkspath, self.resultdir)).status)
+ os.remove(wkspath)
+ wicout = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(wicout))
+
+ wicimg = wicout[0]
+
+ # verify partition size with wic
+ res = runCmd("parted -m %s unit mib p 2>/dev/null" % wicimg,
+ ignore_status=True,
+ native_sysroot=self.native_sysroot)
+ self.assertEqual(0, res.status)
+
+ # parse parted output which looks like this:
+ # BYT;\n
+ # /var/tmp/wic/build/tmpfwvjjkf_-201611101222-hda.direct:200MiB:file:512:512:msdos::;\n
+ # 1:0.00MiB:200MiB:200MiB:ext4::;\n
+ partlns = res.output.splitlines()[2:]
+
+ self.assertEqual(1, len(partlns))
+ self.assertEqual("1:0.00MiB:200MiB:200MiB:ext4::;", partlns[0])
+
+ @OETestID(1848)
+ def test_fixed_size_error(self):
+ """
+ Test creation of a simple image with partition size controlled through
+ --fixed-size flag. The size of partition is intentionally set to 1MiB
+ in order to trigger an error in wic.
+ """
+ wkspath, wksname = Wic._make_fixed_size_wks(1)
+
+ self.assertEqual(1, runCmd("wic create %s -e core-image-minimal -o %s" \
+ % (wkspath, self.resultdir), ignore_status=True).status)
+ os.remove(wkspath)
+ wicout = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(0, len(wicout))
+
+ @only_for_arch(['i586', 'i686', 'x86_64'])
+ @OETestID(1854)
+ def test_rawcopy_plugin_qemu(self):
+ """Test rawcopy plugin in qemu"""
+ # build ext4 and wic images
+ for fstype in ("ext4", "wic"):
+ config = 'IMAGE_FSTYPES = "%s"\nWKS_FILE = "test_rawcopy_plugin.wks.in"\n' % fstype
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+ self.remove_config(config)
+
+ with runqemu('core-image-minimal', ssh=False, image_fstype='wic') as qemu:
+ cmd = "grep sda. /proc/partitions |wc -l"
+ status, output = qemu.run_serial(cmd)
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ self.assertEqual(output, '2')
+
+ @OETestID(1853)
+ def test_rawcopy_plugin(self):
+ """Test rawcopy plugin"""
+ img = 'core-image-minimal'
+ machine = get_bb_var('MACHINE', img)
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part /boot --active --source bootimg-pcbios\n',
+ 'part / --source rawcopy --sourceparams="file=%s-%s.ext4" --use-uuid\n'\
+ % (img, machine),
+ 'bootloader --timeout=0 --append="console=ttyS0,115200n8"\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ self.assertEqual(0, runCmd(cmd).status)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ @OETestID(1849)
+ def test_fs_types(self):
+ """Test filesystem types for empty and not empty partitions"""
+ img = 'core-image-minimal'
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part ext2 --fstype ext2 --source rootfs\n',
+ 'part btrfs --fstype btrfs --source rootfs --size 40M\n',
+ 'part squash --fstype squashfs --source rootfs\n',
+ 'part swap --fstype swap --size 1M\n',
+ 'part emptyvfat --fstype vfat --size 1M\n',
+ 'part emptymsdos --fstype msdos --size 1M\n',
+ 'part emptyext2 --fstype ext2 --size 1M\n',
+ 'part emptybtrfs --fstype btrfs --size 100M\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ self.assertEqual(0, runCmd(cmd).status)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ @OETestID(1851)
+ def test_kickstart_parser(self):
+ """Test wks parser options"""
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(['part / --fstype ext3 --source rootfs --system-id 0xFF '\
+ '--overhead-factor 1.2 --size 100k\n'])
+ wks.flush()
+ cmd = "wic create %s -e core-image-minimal -o %s" % (wks.name, self.resultdir)
+ self.assertEqual(0, runCmd(cmd).status)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ @OETestID(1850)
+ def test_image_bootpart_globbed(self):
+ """Test globbed sources with image-bootpart plugin"""
+ img = "core-image-minimal"
+ cmd = "wic create sdimage-bootpart -e %s -o %s" % (img, self.resultdir)
+ config = 'IMAGE_BOOT_FILES = "%s*"' % get_bb_var('KERNEL_IMAGETYPE', img)
+ self.append_config(config)
+ self.assertEqual(0, runCmd(cmd).status)
+ self.remove_config(config)
+ self.assertEqual(1, len(glob(self.resultdir + "sdimage-bootpart-*direct")))
+
+ @OETestID(1855)
+ def test_sparse_copy(self):
+ """Test sparse_copy with FIEMAP and SEEK_HOLE filemap APIs"""
+ libpath = os.path.join(get_bb_var('COREBASE'), 'scripts', 'lib', 'wic')
+ sys.path.insert(0, libpath)
+ from filemap import FilemapFiemap, FilemapSeek, sparse_copy, ErrorNotSupp
+ with NamedTemporaryFile("w", suffix=".wic-sparse") as sparse:
+ src_name = sparse.name
+ src_size = 1024 * 10
+ sparse.truncate(src_size)
+ # write one byte to the file
+ with open(src_name, 'r+b') as sfile:
+ sfile.seek(1024 * 4)
+ sfile.write(b'\x00')
+ dest = sparse.name + '.out'
+ # copy src file to dest using different filemap APIs
+ for api in (FilemapFiemap, FilemapSeek, None):
+ if os.path.exists(dest):
+ os.unlink(dest)
+ try:
+ sparse_copy(sparse.name, dest, api=api)
+ except ErrorNotSupp:
+ continue # skip unsupported API
+ dest_stat = os.stat(dest)
+ self.assertEqual(dest_stat.st_size, src_size)
+ # 8 blocks is 4K (physical sector size)
+ self.assertEqual(dest_stat.st_blocks, 8)
+ os.unlink(dest)
+
+ @OETestID(1857)
+ def test_wic_ls(self):
+ """Test listing image content using 'wic ls'"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list partitions
+ result = runCmd("wic ls %s -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertEqual(3, len(result.output.split('\n')))
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertEqual(6, len(result.output.split('\n')))
+
+ @OETestID(1856)
+ def test_wic_cp(self):
+ """Test copy files and directories to the the wic image."""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertEqual(6, len(result.output.split('\n')))
+
+ with NamedTemporaryFile("w", suffix=".wic-cp") as testfile:
+ testfile.write("test")
+
+ # copy file to the partition
+ result = runCmd("wic cp %s %s:1/ -n %s" % (testfile.name, images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # check if file is there
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertEqual(7, len(result.output.split('\n')))
+ self.assertTrue(os.path.basename(testfile.name) in result.output)
+
+ # prepare directory
+ testdir = os.path.join(self.resultdir, 'wic-test-cp-dir')
+ testsubdir = os.path.join(testdir, 'subdir')
+ os.makedirs(os.path.join(testsubdir))
+ copy(testfile.name, testdir)
+
+ # copy directory to the partition
+ result = runCmd("wic cp %s %s:1/ -n %s" % (testdir, images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # check if directory is there
+ result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertEqual(8, len(result.output.split('\n')))
+ self.assertTrue(os.path.basename(testdir) in result.output)
+
+ @OETestID(1858)
+ def test_wic_rm(self):
+ """Test removing files and directories from the the wic image."""
+ self.assertEqual(0, runCmd("wic create mkefidisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "mkefidisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the first partition
+ result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertIn('\nBZIMAGE ', result.output)
+ self.assertIn('\nEFI <DIR> ', result.output)
+
+ # remove file
+ result = runCmd("wic rm %s:1/bzimage -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # remove directory
+ result = runCmd("wic rm %s:1/efi -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # check if they're removed
+ result = runCmd("wic ls %s:1 -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertNotIn('\nBZIMAGE ', result.output)
+ self.assertNotIn('\nEFI <DIR> ', result.output)
+
+ @OETestID(1922)
+ def test_mkfs_extraopts(self):
+ """Test wks option --mkfs-extraopts for empty and not empty partitions"""
+ img = 'core-image-minimal'
+ with NamedTemporaryFile("w", suffix=".wks") as wks:
+ wks.writelines(
+ ['part ext2 --fstype ext2 --source rootfs --mkfs-extraopts "-D -F -i 8192"\n',
+ "part btrfs --fstype btrfs --source rootfs --size 40M --mkfs-extraopts='--quiet'\n",
+ 'part squash --fstype squashfs --source rootfs --mkfs-extraopts "-no-sparse -b 4096"\n',
+ 'part emptyvfat --fstype vfat --size 1M --mkfs-extraopts "-S 1024 -s 64"\n',
+ 'part emptymsdos --fstype msdos --size 1M --mkfs-extraopts "-S 1024 -s 64"\n',
+ 'part emptyext2 --fstype ext2 --size 1M --mkfs-extraopts "-D -F -i 8192"\n',
+ 'part emptybtrfs --fstype btrfs --size 100M --mkfs-extraopts "--mixed -K"\n'])
+ wks.flush()
+ cmd = "wic create %s -e %s -o %s" % (wks.name, img, self.resultdir)
+ self.assertEqual(0, runCmd(cmd).status)
+ wksname = os.path.splitext(os.path.basename(wks.name))[0]
+ out = glob(self.resultdir + "%s-*direct" % wksname)
+ self.assertEqual(1, len(out))
+
+ def test_expand_mbr_image(self):
+ """Test wic write --expand command for mbr image"""
+ # build an image
+ config = 'IMAGE_FSTYPES = "wic"\nWKS_FILE = "directdisk.wks"\n'
+ self.append_config(config)
+ self.assertEqual(0, bitbake('core-image-minimal').status)
+
+ # get path to the image
+ bb_vars = get_bb_vars(['DEPLOY_DIR_IMAGE', 'MACHINE'])
+ deploy_dir = bb_vars['DEPLOY_DIR_IMAGE']
+ machine = bb_vars['MACHINE']
+ image_path = os.path.join(deploy_dir, 'core-image-minimal-%s.wic' % machine)
+
+ self.remove_config(config)
+
+ try:
+ # expand image to 1G
+ new_image_path = None
+ with NamedTemporaryFile(mode='wb', suffix='.wic.exp',
+ dir=deploy_dir, delete=False) as sparse:
+ sparse.truncate(1024 ** 3)
+ new_image_path = sparse.name
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+ cmd = "wic write -n %s --expand 1:0 %s %s" % (sysroot, image_path, new_image_path)
+ self.assertEqual(0, runCmd(cmd).status)
+
+ # check if partitions are expanded
+ orig = runCmd("wic ls %s -n %s" % (image_path, sysroot))
+ exp = runCmd("wic ls %s -n %s" % (new_image_path, sysroot))
+ orig_sizes = [int(line.split()[3]) for line in orig.output.split('\n')[1:]]
+ exp_sizes = [int(line.split()[3]) for line in exp.output.split('\n')[1:]]
+ self.assertEqual(orig_sizes[0], exp_sizes[0]) # first partition is not resized
+ self.assertTrue(orig_sizes[1] < exp_sizes[1])
+
+ # Check if all free space is partitioned
+ result = runCmd("%s/usr/sbin/sfdisk -F %s" % (sysroot, new_image_path))
+ self.assertTrue("0 B, 0 bytes, 0 sectors" in result.output)
+
+ os.rename(image_path, image_path + '.bak')
+ os.rename(new_image_path, image_path)
+
+ # Check if it boots in qemu
+ with runqemu('core-image-minimal', ssh=False) as qemu:
+ cmd = "ls /etc/"
+ status, output = qemu.run_serial('true')
+ self.assertEqual(1, status, 'Failed to run command "%s": %s' % (cmd, output))
+ finally:
+ if os.path.exists(new_image_path):
+ os.unlink(new_image_path)
+ if os.path.exists(image_path + '.bak'):
+ os.rename(image_path + '.bak', image_path)
+
+ def test_wic_ls_ext(self):
+ """Test listing content of the ext partition using 'wic ls'"""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the second ext4 partition
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(
+ set(line.split()[-1] for line in result.output.split('\n') if line)))
+
+ def test_wic_cp_ext(self):
+ """Test copy files and directories to the ext partition."""
+ self.assertEqual(0, runCmd("wic create wictestdisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "wictestdisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the ext4 partition
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ dirs = set(line.split()[-1] for line in result.output.split('\n') if line)
+ self.assertTrue(set(['bin', 'home', 'proc', 'usr', 'var', 'dev', 'lib', 'sbin']).issubset(dirs))
+
+ with NamedTemporaryFile("w", suffix=".wic-cp") as testfile:
+ testfile.write("test")
+
+ # copy file to the partition
+ result = runCmd("wic cp %s %s:2/ -n %s" % (testfile.name, images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # check if file is there
+ result = runCmd("wic ls %s:2/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ newdirs = set(line.split()[-1] for line in result.output.split('\n') if line)
+ self.assertEqual(newdirs.difference(dirs), set([os.path.basename(testfile.name)]))
+
+ def test_wic_rm_ext(self):
+ """Test removing files from the ext partition."""
+ self.assertEqual(0, runCmd("wic create mkefidisk "
+ "--image-name=core-image-minimal "
+ "-D -o %s" % self.resultdir).status)
+ images = glob(self.resultdir + "mkefidisk-*.direct")
+ self.assertEqual(1, len(images))
+
+ sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools')
+
+ # list directory content of the /etc directory on ext4 partition
+ result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertTrue('fstab' in [line.split()[-1] for line in result.output.split('\n') if line])
+
+ # remove file
+ result = runCmd("wic rm %s:2/etc/fstab -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+
+ # check if it's removed
+ result = runCmd("wic ls %s:2/etc/ -n %s" % (images[0], sysroot))
+ self.assertEqual(0, result.status)
+ self.assertTrue('fstab' not in [line.split()[-1] for line in result.output.split('\n') if line])
diff --git a/poky/meta/lib/oeqa/selftest/context.py b/poky/meta/lib/oeqa/selftest/context.py
new file mode 100644
index 000000000..9e90d3c25
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/context.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import time
+import glob
+import sys
+import imp
+import signal
+from shutil import copyfile
+from random import choice
+
+import oeqa
+
+from oeqa.core.context import OETestContext, OETestContextExecutor
+from oeqa.core.exception import OEQAPreRun, OEQATestNotFound
+
+from oeqa.utils.commands import runCmd, get_bb_vars, get_test_layer
+
+class OESelftestTestContext(OETestContext):
+ def __init__(self, td=None, logger=None, machines=None, config_paths=None):
+ super(OESelftestTestContext, self).__init__(td, logger)
+
+ self.machines = machines
+ self.custommachine = None
+ self.config_paths = config_paths
+
+ def runTests(self, machine=None, skips=[]):
+ if machine:
+ self.custommachine = machine
+ if machine == 'random':
+ self.custommachine = choice(self.machines)
+ self.logger.info('Run tests with custom MACHINE set to: %s' % \
+ self.custommachine)
+ return super(OESelftestTestContext, self).runTests(skips)
+
+ def listTests(self, display_type, machine=None):
+ return super(OESelftestTestContext, self).listTests(display_type)
+
+class OESelftestTestContextExecutor(OETestContextExecutor):
+ _context_class = OESelftestTestContext
+ _script_executor = 'oe-selftest'
+
+ name = 'oe-selftest'
+ help = 'oe-selftest test component'
+ description = 'Executes selftest tests'
+
+ def register_commands(self, logger, parser):
+ group = parser.add_mutually_exclusive_group(required=True)
+
+ group.add_argument('-a', '--run-all-tests', default=False,
+ action="store_true", dest="run_all_tests",
+ help='Run all (unhidden) tests')
+ group.add_argument('-R', '--skip-tests', required=False, action='store',
+ nargs='+', dest="skips", default=None,
+ help='Run all (unhidden) tests except the ones specified. Format should be <module>[.<class>[.<test_method>]]')
+ group.add_argument('-r', '--run-tests', required=False, action='store',
+ nargs='+', dest="run_tests", default=None,
+ help='Select what tests to run (modules, classes or test methods). Format should be: <module>.<class>.<test_method>')
+
+ group.add_argument('-m', '--list-modules', required=False,
+ action="store_true", default=False,
+ help='List all available test modules.')
+ group.add_argument('--list-classes', required=False,
+ action="store_true", default=False,
+ help='List all available test classes.')
+ group.add_argument('-l', '--list-tests', required=False,
+ action="store_true", default=False,
+ help='List all available tests.')
+
+ parser.add_argument('--machine', required=False, choices=['random', 'all'],
+ help='Run tests on different machines (random/all).')
+
+ parser.set_defaults(func=self.run)
+
+ def _get_available_machines(self):
+ machines = []
+
+ bbpath = self.tc_kwargs['init']['td']['BBPATH'].split(':')
+
+ for path in bbpath:
+ found_machines = glob.glob(os.path.join(path, 'conf', 'machine', '*.conf'))
+ if found_machines:
+ for i in found_machines:
+ # eg: '/home/<user>/poky/meta-intel/conf/machine/intel-core2-32.conf'
+ machines.append(os.path.splitext(os.path.basename(i))[0])
+
+ return machines
+
+ def _get_cases_paths(self, bbpath):
+ cases_paths = []
+ for layer in bbpath:
+ cases_dir = os.path.join(layer, 'lib', 'oeqa', 'selftest', 'cases')
+ if os.path.isdir(cases_dir):
+ cases_paths.append(cases_dir)
+ return cases_paths
+
+ def _process_args(self, logger, args):
+ args.output_log = '%s-results-%s.log' % (self.name,
+ time.strftime("%Y%m%d%H%M%S"))
+ args.test_data_file = None
+ args.CASES_PATHS = None
+
+ super(OESelftestTestContextExecutor, self)._process_args(logger, args)
+
+ if args.list_modules:
+ args.list_tests = 'module'
+ elif args.list_classes:
+ args.list_tests = 'class'
+ elif args.list_tests:
+ args.list_tests = 'name'
+
+ self.tc_kwargs['init']['td'] = get_bb_vars()
+ self.tc_kwargs['init']['machines'] = self._get_available_machines()
+
+ builddir = os.environ.get("BUILDDIR")
+ self.tc_kwargs['init']['config_paths'] = {}
+ self.tc_kwargs['init']['config_paths']['testlayer_path'] = \
+ get_test_layer()
+ self.tc_kwargs['init']['config_paths']['builddir'] = builddir
+ self.tc_kwargs['init']['config_paths']['localconf'] = \
+ os.path.join(builddir, "conf/local.conf")
+ self.tc_kwargs['init']['config_paths']['localconf_backup'] = \
+ os.path.join(builddir, "conf/local.conf.orig")
+ self.tc_kwargs['init']['config_paths']['localconf_class_backup'] = \
+ os.path.join(builddir, "conf/local.conf.bk")
+ self.tc_kwargs['init']['config_paths']['bblayers'] = \
+ os.path.join(builddir, "conf/bblayers.conf")
+ self.tc_kwargs['init']['config_paths']['bblayers_backup'] = \
+ os.path.join(builddir, "conf/bblayers.conf.orig")
+ self.tc_kwargs['init']['config_paths']['bblayers_class_backup'] = \
+ os.path.join(builddir, "conf/bblayers.conf.bk")
+
+ copyfile(self.tc_kwargs['init']['config_paths']['localconf'],
+ self.tc_kwargs['init']['config_paths']['localconf_backup'])
+ copyfile(self.tc_kwargs['init']['config_paths']['bblayers'],
+ self.tc_kwargs['init']['config_paths']['bblayers_backup'])
+
+ self.tc_kwargs['run']['skips'] = args.skips
+
+ def _pre_run(self):
+ def _check_required_env_variables(vars):
+ for var in vars:
+ if not os.environ.get(var):
+ self.tc.logger.error("%s is not set. Did you forget to source your build environment setup script?" % var)
+ raise OEQAPreRun
+
+ def _check_presence_meta_selftest():
+ builddir = os.environ.get("BUILDDIR")
+ if os.getcwd() != builddir:
+ self.tc.logger.info("Changing cwd to %s" % builddir)
+ os.chdir(builddir)
+
+ if not "meta-selftest" in self.tc.td["BBLAYERS"]:
+ self.tc.logger.warn("meta-selftest layer not found in BBLAYERS, adding it")
+ meta_selftestdir = os.path.join(
+ self.tc.td["BBLAYERS_FETCH_DIR"], 'meta-selftest')
+ if os.path.isdir(meta_selftestdir):
+ runCmd("bitbake-layers add-layer %s" %meta_selftestdir)
+ # reload data is needed because a meta-selftest layer was add
+ self.tc.td = get_bb_vars()
+ self.tc.config_paths['testlayer_path'] = get_test_layer()
+ else:
+ self.tc.logger.error("could not locate meta-selftest in:\n%s" % meta_selftestdir)
+ raise OEQAPreRun
+
+ def _add_layer_libs():
+ bbpath = self.tc.td['BBPATH'].split(':')
+ layer_libdirs = [p for p in (os.path.join(l, 'lib') \
+ for l in bbpath) if os.path.exists(p)]
+ if layer_libdirs:
+ self.tc.logger.info("Adding layer libraries:")
+ for l in layer_libdirs:
+ self.tc.logger.info("\t%s" % l)
+
+ sys.path.extend(layer_libdirs)
+ imp.reload(oeqa.selftest)
+
+ _check_required_env_variables(["BUILDDIR"])
+ _check_presence_meta_selftest()
+
+ if "buildhistory.bbclass" in self.tc.td["BBINCLUDED"]:
+ self.tc.logger.error("You have buildhistory enabled already and this isn't recommended for selftest, please disable it first.")
+ raise OEQAPreRun
+
+ if "PRSERV_HOST" in self.tc.td:
+ self.tc.logger.error("Please unset PRSERV_HOST in order to run oe-selftest")
+ raise OEQAPreRun
+
+ if "SANITY_TESTED_DISTROS" in self.tc.td:
+ self.tc.logger.error("Please unset SANITY_TESTED_DISTROS in order to run oe-selftest")
+ raise OEQAPreRun
+
+ _add_layer_libs()
+
+ self.tc.logger.info("Running bitbake -p")
+ runCmd("bitbake -p")
+
+ def _internal_run(self, logger, args):
+ self.module_paths = self._get_cases_paths(
+ self.tc_kwargs['init']['td']['BBPATH'].split(':'))
+
+ self.tc = self._context_class(**self.tc_kwargs['init'])
+ try:
+ self.tc.loadTests(self.module_paths, **self.tc_kwargs['load'])
+ except OEQATestNotFound as ex:
+ logger.error(ex)
+ sys.exit(1)
+
+ if args.list_tests:
+ rc = self.tc.listTests(args.list_tests, **self.tc_kwargs['list'])
+ else:
+ self._pre_run()
+ rc = self.tc.runTests(**self.tc_kwargs['run'])
+ rc.logDetails()
+ rc.logSummary(self.name)
+
+ return rc
+
+ def _signal_clean_handler(self, signum, frame):
+ sys.exit(1)
+
+ def run(self, logger, args):
+ self._process_args(logger, args)
+
+ signal.signal(signal.SIGTERM, self._signal_clean_handler)
+
+ rc = None
+ try:
+ if args.machine:
+ logger.info('Custom machine mode enabled. MACHINE set to %s' %
+ args.machine)
+
+ if args.machine == 'all':
+ results = []
+ for m in self.tc_kwargs['init']['machines']:
+ self.tc_kwargs['run']['machine'] = m
+ results.append(self._internal_run(logger, args))
+
+ # XXX: the oe-selftest script only needs to know if one
+ # machine run fails
+ for r in results:
+ rc = r
+ if not r.wasSuccessful():
+ break
+
+ else:
+ self.tc_kwargs['run']['machine'] = args.machine
+ return self._internal_run(logger, args)
+
+ else:
+ self.tc_kwargs['run']['machine'] = args.machine
+ rc = self._internal_run(logger, args)
+ finally:
+ config_paths = self.tc_kwargs['init']['config_paths']
+ if os.path.exists(config_paths['localconf_backup']):
+ copyfile(config_paths['localconf_backup'],
+ config_paths['localconf'])
+ os.remove(config_paths['localconf_backup'])
+
+ if os.path.exists(config_paths['bblayers_backup']):
+ copyfile(config_paths['bblayers_backup'],
+ config_paths['bblayers'])
+ os.remove(config_paths['bblayers_backup'])
+
+ if os.path.exists(config_paths['localconf_class_backup']):
+ os.remove(config_paths['localconf_class_backup'])
+ if os.path.exists(config_paths['bblayers_class_backup']):
+ os.remove(config_paths['bblayers_class_backup'])
+
+ output_link = os.path.join(os.path.dirname(args.output_log),
+ "%s-results.log" % self.name)
+ if os.path.exists(output_link):
+ os.remove(output_link)
+ os.symlink(args.output_log, output_link)
+
+ return rc
+
+_executor_class = OESelftestTestContextExecutor
diff --git a/poky/meta/lib/oeqa/targetcontrol.py b/poky/meta/lib/oeqa/targetcontrol.py
new file mode 100644
index 000000000..59a9c35a0
--- /dev/null
+++ b/poky/meta/lib/oeqa/targetcontrol.py
@@ -0,0 +1,232 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# This module is used by testimage.bbclass for setting up and controlling a target machine.
+
+import os
+import shutil
+import subprocess
+import bb
+import traceback
+import sys
+import logging
+from oeqa.utils.sshcontrol import SSHControl
+from oeqa.utils.qemurunner import QemuRunner
+from oeqa.utils.qemutinyrunner import QemuTinyRunner
+from oeqa.utils.dump import TargetDumper
+from oeqa.controllers.testtargetloader import TestTargetLoader
+from abc import ABCMeta, abstractmethod
+
+class BaseTarget(object, metaclass=ABCMeta):
+
+ supported_image_fstypes = []
+
+ def __init__(self, d, logger):
+ self.connection = None
+ self.ip = None
+ self.server_ip = None
+ self.datetime = d.getVar('DATETIME')
+ self.testdir = d.getVar("TEST_LOG_DIR")
+ self.pn = d.getVar("PN")
+ self.logger = logger
+
+ @abstractmethod
+ def deploy(self):
+
+ self.sshlog = os.path.join(self.testdir, "ssh_target_log.%s" % self.datetime)
+ sshloglink = os.path.join(self.testdir, "ssh_target_log")
+ if os.path.islink(sshloglink):
+ os.unlink(sshloglink)
+ os.symlink(self.sshlog, sshloglink)
+ self.logger.info("SSH log file: %s" % self.sshlog)
+
+ @abstractmethod
+ def start(self, params=None, ssh=True, extra_bootparams=None):
+ pass
+
+ @abstractmethod
+ def stop(self):
+ pass
+
+ @classmethod
+ def get_extra_files(self):
+ return None
+
+ @classmethod
+ def match_image_fstype(self, d, image_fstypes=None):
+ if not image_fstypes:
+ image_fstypes = d.getVar('IMAGE_FSTYPES').split(' ')
+ possible_image_fstypes = [fstype for fstype in self.supported_image_fstypes if fstype in image_fstypes]
+ if possible_image_fstypes:
+ return possible_image_fstypes[0]
+ else:
+ return None
+
+ def get_image_fstype(self, d):
+ image_fstype = self.match_image_fstype(d)
+ if image_fstype:
+ return image_fstype
+ else:
+ bb.fatal("IMAGE_FSTYPES should contain a Target Controller supported image fstype: %s " % ', '.join(map(str, self.supported_image_fstypes)))
+
+ def restart(self, params=None):
+ self.stop()
+ self.start(params)
+
+ def run(self, cmd, timeout=None):
+ return self.connection.run(cmd, timeout)
+
+ def copy_to(self, localpath, remotepath):
+ return self.connection.copy_to(localpath, remotepath)
+
+ def copy_from(self, remotepath, localpath):
+ return self.connection.copy_from(remotepath, localpath)
+
+
+
+class QemuTarget(BaseTarget):
+
+ supported_image_fstypes = ['ext3', 'ext4', 'cpio.gz', 'wic']
+
+ def __init__(self, d, logger, image_fstype=None):
+
+ import oe.types
+
+ super(QemuTarget, self).__init__(d, logger)
+
+ self.rootfs = ''
+ self.kernel = ''
+ self.image_fstype = ''
+
+ if d.getVar('FIND_ROOTFS') == '1':
+ self.image_fstype = image_fstype or self.get_image_fstype(d)
+ self.rootfs = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("IMAGE_LINK_NAME") + '.' + self.image_fstype)
+ self.kernel = os.path.join(d.getVar("DEPLOY_DIR_IMAGE"), d.getVar("KERNEL_IMAGETYPE", False) + '-' + d.getVar('MACHINE', False) + '.bin')
+ self.qemulog = os.path.join(self.testdir, "qemu_boot_log.%s" % self.datetime)
+ dump_target_cmds = d.getVar("testimage_dump_target")
+ dump_host_cmds = d.getVar("testimage_dump_host")
+ dump_dir = d.getVar("TESTIMAGE_DUMP_DIR")
+ qemu_use_kvm = d.getVar("QEMU_USE_KVM")
+ if qemu_use_kvm and \
+ (oe.types.boolean(qemu_use_kvm) and "x86" in d.getVar("MACHINE") or \
+ d.getVar("MACHINE") in qemu_use_kvm.split()):
+ use_kvm = True
+ else:
+ use_kvm = False
+
+ # Log QemuRunner log output to a file
+ import oe.path
+ bb.utils.mkdirhier(self.testdir)
+ self.qemurunnerlog = os.path.join(self.testdir, 'qemurunner_log.%s' % self.datetime)
+ loggerhandler = logging.FileHandler(self.qemurunnerlog)
+ loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+ self.logger.addHandler(loggerhandler)
+ oe.path.symlink(os.path.basename(self.qemurunnerlog), os.path.join(self.testdir, 'qemurunner_log'), force=True)
+
+ if d.getVar("DISTRO") == "poky-tiny":
+ self.runner = QemuTinyRunner(machine=d.getVar("MACHINE"),
+ rootfs=self.rootfs,
+ tmpdir = d.getVar("TMPDIR"),
+ deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE"),
+ display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY"),
+ logfile = self.qemulog,
+ kernel = self.kernel,
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")),
+ logger = logger)
+ else:
+ self.runner = QemuRunner(machine=d.getVar("MACHINE"),
+ rootfs=self.rootfs,
+ tmpdir = d.getVar("TMPDIR"),
+ deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE"),
+ display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY"),
+ logfile = self.qemulog,
+ boottime = int(d.getVar("TEST_QEMUBOOT_TIMEOUT")),
+ use_kvm = use_kvm,
+ dump_dir = dump_dir,
+ dump_host_cmds = d.getVar("testimage_dump_host"),
+ logger = logger)
+
+ self.target_dumper = TargetDumper(dump_target_cmds, dump_dir, self.runner)
+
+ def deploy(self):
+ bb.utils.mkdirhier(self.testdir)
+
+ qemuloglink = os.path.join(self.testdir, "qemu_boot_log")
+ if os.path.islink(qemuloglink):
+ os.unlink(qemuloglink)
+ os.symlink(self.qemulog, qemuloglink)
+
+ self.logger.info("rootfs file: %s" % self.rootfs)
+ self.logger.info("Qemu log file: %s" % self.qemulog)
+ super(QemuTarget, self).deploy()
+
+ def start(self, params=None, ssh=True, extra_bootparams='', runqemuparams='', launch_cmd='', discard_writes=True):
+ if launch_cmd:
+ start = self.runner.launch(get_ip=ssh, launch_cmd=launch_cmd)
+ else:
+ start = self.runner.start(params, get_ip=ssh, extra_bootparams=extra_bootparams, runqemuparams=runqemuparams, discard_writes=discard_writes)
+
+ if start:
+ if ssh:
+ self.ip = self.runner.ip
+ self.server_ip = self.runner.server_ip
+ self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
+ else:
+ self.stop()
+ if os.path.exists(self.qemulog):
+ with open(self.qemulog, 'r') as f:
+ bb.error("Qemu log output from %s:\n%s" % (self.qemulog, f.read()))
+ raise bb.build.FuncFailed("%s - FAILED to start qemu - check the task log and the boot log" % self.pn)
+
+ def check(self):
+ return self.runner.is_alive()
+
+ def stop(self):
+ self.runner.stop()
+ self.connection = None
+ self.ip = None
+ self.server_ip = None
+
+ def restart(self, params=None):
+ if self.runner.restart(params):
+ self.ip = self.runner.ip
+ self.server_ip = self.runner.server_ip
+ self.connection = SSHControl(ip=self.ip, logfile=self.sshlog)
+ else:
+ raise bb.build.FuncFailed("%s - FAILED to re-start qemu - check the task log and the boot log" % self.pn)
+
+ def run_serial(self, command, timeout=5):
+ return self.runner.run_serial(command, timeout=timeout)
+
+
+class SimpleRemoteTarget(BaseTarget):
+
+ def __init__(self, d):
+ super(SimpleRemoteTarget, self).__init__(d)
+ addr = d.getVar("TEST_TARGET_IP") or bb.fatal('Please set TEST_TARGET_IP with the IP address of the machine you want to run the tests on.')
+ self.ip = addr.split(":")[0]
+ try:
+ self.port = addr.split(":")[1]
+ except IndexError:
+ self.port = None
+ self.logger.info("Target IP: %s" % self.ip)
+ self.server_ip = d.getVar("TEST_SERVER_IP")
+ if not self.server_ip:
+ try:
+ self.server_ip = subprocess.check_output(['ip', 'route', 'get', self.ip ]).split("\n")[0].split()[-1]
+ except Exception as e:
+ bb.fatal("Failed to determine the host IP address (alternatively you can set TEST_SERVER_IP with the IP address of this machine): %s" % e)
+ self.logger.info("Server IP: %s" % self.server_ip)
+
+ def deploy(self):
+ super(SimpleRemoteTarget, self).deploy()
+
+ def start(self, params=None, ssh=True, extra_bootparams=None):
+ if ssh:
+ self.connection = SSHControl(self.ip, logfile=self.sshlog, port=self.port)
+
+ def stop(self):
+ self.connection = None
+ self.ip = None
+ self.server_ip = None
diff --git a/poky/meta/lib/oeqa/utils/__init__.py b/poky/meta/lib/oeqa/utils/__init__.py
new file mode 100644
index 000000000..d38a32301
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/__init__.py
@@ -0,0 +1,103 @@
+# Enable other layers to have modules in the same named directory
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
+
+# Borrowed from CalledProcessError
+
+class CommandError(Exception):
+ def __init__(self, retcode, cmd, output = None):
+ self.retcode = retcode
+ self.cmd = cmd
+ self.output = output
+ def __str__(self):
+ return "Command '%s' returned non-zero exit status %d with output: %s" % (self.cmd, self.retcode, self.output)
+
+def avoid_paths_in_environ(paths):
+ """
+ Searches for every path in os.environ['PATH']
+ if found remove it.
+
+ Returns new PATH without avoided PATHs.
+ """
+ import os
+
+ new_path = ''
+ for p in os.environ['PATH'].split(':'):
+ avoid = False
+ for pa in paths:
+ if pa in p:
+ avoid = True
+ break
+ if avoid:
+ continue
+
+ new_path = new_path + p + ':'
+
+ new_path = new_path[:-1]
+ return new_path
+
+def make_logger_bitbake_compatible(logger):
+ import logging
+
+ """
+ Bitbake logger redifines debug() in order to
+ set a level within debug, this breaks compatibility
+ with vainilla logging, so we neeed to redifine debug()
+ method again also add info() method with INFO + 1 level.
+ """
+ def _bitbake_log_debug(*args, **kwargs):
+ lvl = logging.DEBUG
+
+ if isinstance(args[0], int):
+ lvl = args[0]
+ msg = args[1]
+ args = args[2:]
+ else:
+ msg = args[0]
+ args = args[1:]
+
+ logger.log(lvl, msg, *args, **kwargs)
+
+ def _bitbake_log_info(msg, *args, **kwargs):
+ logger.log(logging.INFO + 1, msg, *args, **kwargs)
+
+ logger.debug = _bitbake_log_debug
+ logger.info = _bitbake_log_info
+
+ return logger
+
+def load_test_components(logger, executor):
+ import sys
+ import os
+ import importlib
+
+ from oeqa.core.context import OETestContextExecutor
+
+ components = {}
+
+ for path in sys.path:
+ base_dir = os.path.join(path, 'oeqa')
+ if os.path.exists(base_dir) and os.path.isdir(base_dir):
+ for file in os.listdir(base_dir):
+ comp_name = file
+ comp_context = os.path.join(base_dir, file, 'context.py')
+ if os.path.exists(comp_context):
+ comp_plugin = importlib.import_module('oeqa.%s.%s' % \
+ (comp_name, 'context'))
+ try:
+ if not issubclass(comp_plugin._executor_class,
+ OETestContextExecutor):
+ raise TypeError("Component %s in %s, _executor_class "\
+ "isn't derived from OETestContextExecutor."\
+ % (comp_name, comp_context))
+
+ if comp_plugin._executor_class._script_executor \
+ != executor:
+ continue
+
+ components[comp_name] = comp_plugin._executor_class()
+ except AttributeError:
+ raise AttributeError("Component %s in %s don't have "\
+ "_executor_class defined." % (comp_name, comp_context))
+
+ return components
diff --git a/poky/meta/lib/oeqa/utils/buildproject.py b/poky/meta/lib/oeqa/utils/buildproject.py
new file mode 100644
index 000000000..721f35d99
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/buildproject.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2013-2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Provides a class for automating build tests for projects
+
+import os
+import re
+import subprocess
+import shutil
+import tempfile
+
+from abc import ABCMeta, abstractmethod
+
+class BuildProject(metaclass=ABCMeta):
+ def __init__(self, uri, foldername=None, tmpdir=None, dl_dir=None):
+ self.uri = uri
+ self.archive = os.path.basename(uri)
+ if not tmpdir:
+ tmpdir = tempfile.mkdtemp(prefix='buildproject')
+ self.localarchive = os.path.join(tmpdir, self.archive)
+ self.dl_dir = dl_dir
+ if foldername:
+ self.fname = foldername
+ else:
+ self.fname = re.sub(r'\.tar\.bz2$|\.tar\.gz$|\.tar\.xz$', '', self.archive)
+
+ # Download self.archive to self.localarchive
+ def _download_archive(self):
+ if self.dl_dir and os.path.exists(os.path.join(self.dl_dir, self.archive)):
+ shutil.copyfile(os.path.join(self.dl_dir, self.archive), self.localarchive)
+ return
+
+ cmd = "wget -O %s %s" % (self.localarchive, self.uri)
+ subprocess.check_output(cmd, shell=True)
+
+ # This method should provide a way to run a command in the desired environment.
+ @abstractmethod
+ def _run(self, cmd):
+ pass
+
+ # The timeout parameter of target.run is set to 0 to make the ssh command
+ # run with no timeout.
+ def run_configure(self, configure_args='', extra_cmds=''):
+ return self._run('cd %s; gnu-configize; %s ./configure %s' % (self.targetdir, extra_cmds, configure_args))
+
+ def run_make(self, make_args=''):
+ return self._run('cd %s; make %s' % (self.targetdir, make_args))
+
+ def run_install(self, install_args=''):
+ return self._run('cd %s; make install %s' % (self.targetdir, install_args))
+
+ def clean(self):
+ self._run('rm -rf %s' % self.targetdir)
+ subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
diff --git a/poky/meta/lib/oeqa/utils/commands.py b/poky/meta/lib/oeqa/utils/commands.py
new file mode 100644
index 000000000..0d9cf23fe
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/commands.py
@@ -0,0 +1,357 @@
+# Copyright (c) 2013-2014 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# DESCRIPTION
+# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
+# It provides a class and methods for running commands on the host in a convienent way for tests.
+
+
+
+import os
+import sys
+import signal
+import subprocess
+import threading
+import time
+import logging
+from oeqa.utils import CommandError
+from oeqa.utils import ftools
+import re
+import contextlib
+# Export test doesn't require bb
+try:
+ import bb
+except ImportError:
+ pass
+
+class Command(object):
+ def __init__(self, command, bg=False, timeout=None, data=None, output_log=None, **options):
+
+ self.defaultopts = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.STDOUT,
+ "stdin": None,
+ "shell": False,
+ "bufsize": -1,
+ }
+
+ self.cmd = command
+ self.bg = bg
+ self.timeout = timeout
+ self.data = data
+
+ self.options = dict(self.defaultopts)
+ if isinstance(self.cmd, str):
+ self.options["shell"] = True
+ if self.data:
+ self.options['stdin'] = subprocess.PIPE
+ self.options.update(options)
+
+ self.status = None
+ # We collect chunks of output before joining them at the end.
+ self._output_chunks = []
+ self._error_chunks = []
+ self.output = None
+ self.error = None
+ self.threads = []
+
+ self.output_log = output_log
+ self.log = logging.getLogger("utils.commands")
+
+ def run(self):
+ self.process = subprocess.Popen(self.cmd, **self.options)
+
+ def readThread(output, stream, logfunc):
+ if logfunc:
+ for line in stream:
+ output.append(line)
+ logfunc(line.decode("utf-8", errors='replace').rstrip())
+ else:
+ output.append(stream.read())
+
+ def readStderrThread():
+ readThread(self._error_chunks, self.process.stderr, self.output_log.error if self.output_log else None)
+
+ def readStdoutThread():
+ readThread(self._output_chunks, self.process.stdout, self.output_log.info if self.output_log else None)
+
+ def writeThread():
+ try:
+ self.process.stdin.write(self.data)
+ self.process.stdin.close()
+ except OSError as ex:
+ # It's not an error when the command does not consume all
+ # of our data. subprocess.communicate() also ignores that.
+ if ex.errno != EPIPE:
+ raise
+
+ # We write in a separate thread because then we can read
+ # without worrying about deadlocks. The additional thread is
+ # expected to terminate by itself and we mark it as a daemon,
+ # so even it should happen to not terminate for whatever
+ # reason, the main process will still exit, which will then
+ # kill the write thread.
+ if self.data:
+ threading.Thread(target=writeThread, daemon=True).start()
+ if self.process.stderr:
+ thread = threading.Thread(target=readStderrThread)
+ thread.start()
+ self.threads.append(thread)
+ if self.output_log:
+ self.output_log.info('Running: %s' % self.cmd)
+ thread = threading.Thread(target=readStdoutThread)
+ thread.start()
+ self.threads.append(thread)
+
+ self.log.debug("Running command '%s'" % self.cmd)
+
+ if not self.bg:
+ if self.timeout is None:
+ for thread in self.threads:
+ thread.join()
+ else:
+ deadline = time.time() + self.timeout
+ for thread in self.threads:
+ timeout = deadline - time.time()
+ if timeout < 0:
+ timeout = 0
+ thread.join(timeout)
+ self.stop()
+
+ def stop(self):
+ for thread in self.threads:
+ if thread.isAlive():
+ self.process.terminate()
+ # let's give it more time to terminate gracefully before killing it
+ thread.join(5)
+ if thread.isAlive():
+ self.process.kill()
+ thread.join()
+
+ def finalize_output(data):
+ if not data:
+ data = ""
+ else:
+ data = b"".join(data)
+ data = data.decode("utf-8", errors='replace').rstrip()
+ return data
+
+ self.output = finalize_output(self._output_chunks)
+ self._output_chunks = None
+ # self.error used to be a byte string earlier, probably unintentionally.
+ # Now it is a normal string, just like self.output.
+ self.error = finalize_output(self._error_chunks)
+ self._error_chunks = None
+ # At this point we know that the process has closed stdout/stderr, so
+ # it is safe and necessary to wait for the actual process completion.
+ self.status = self.process.wait()
+
+ self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
+ # logging the complete output is insane
+ # bitbake -e output is really big
+ # and makes the log file useless
+ if self.status:
+ lout = "\n".join(self.output.splitlines()[-20:])
+ self.log.debug("Last 20 lines:\n%s" % lout)
+
+
+class Result(object):
+ pass
+
+
+def runCmd(command, ignore_status=False, timeout=None, assert_error=True,
+ native_sysroot=None, limit_exc_output=0, output_log=None, **options):
+ result = Result()
+
+ if native_sysroot:
+ extra_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+ (native_sysroot, native_sysroot, native_sysroot)
+ nenv = dict(options.get('env', os.environ))
+ nenv['PATH'] = extra_paths + ':' + nenv.get('PATH', '')
+ options['env'] = nenv
+
+ cmd = Command(command, timeout=timeout, output_log=output_log, **options)
+ cmd.run()
+
+ result.command = command
+ result.status = cmd.status
+ result.output = cmd.output
+ result.error = cmd.error
+ result.pid = cmd.process.pid
+
+ if result.status and not ignore_status:
+ exc_output = result.output
+ if limit_exc_output > 0:
+ split = result.output.splitlines()
+ if len(split) > limit_exc_output:
+ exc_output = "\n... (last %d lines of output)\n" % limit_exc_output + \
+ '\n'.join(split[-limit_exc_output:])
+ if assert_error:
+ raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, exc_output))
+ else:
+ raise CommandError(result.status, command, exc_output)
+
+ return result
+
+
+def bitbake(command, ignore_status=False, timeout=None, postconfig=None, output_log=None, **options):
+
+ if postconfig:
+ postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
+ ftools.write_file(postconfig_file, postconfig)
+ extra_args = "-R %s" % postconfig_file
+ else:
+ extra_args = ""
+
+ if isinstance(command, str):
+ cmd = "bitbake " + extra_args + " " + command
+ else:
+ cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
+
+ try:
+ return runCmd(cmd, ignore_status, timeout, output_log=output_log, **options)
+ finally:
+ if postconfig:
+ os.remove(postconfig_file)
+
+
+def get_bb_env(target=None, postconfig=None):
+ if target:
+ return bitbake("-e %s" % target, postconfig=postconfig).output
+ else:
+ return bitbake("-e", postconfig=postconfig).output
+
+def get_bb_vars(variables=None, target=None, postconfig=None):
+ """Get values of multiple bitbake variables"""
+ bbenv = get_bb_env(target, postconfig=postconfig)
+
+ if variables is not None:
+ variables = list(variables)
+ var_re = re.compile(r'^(export )?(?P<var>\w+(_.*)?)="(?P<value>.*)"$')
+ unset_re = re.compile(r'^unset (?P<var>\w+)$')
+ lastline = None
+ values = {}
+ for line in bbenv.splitlines():
+ match = var_re.match(line)
+ val = None
+ if match:
+ val = match.group('value')
+ else:
+ match = unset_re.match(line)
+ if match:
+ # Handle [unexport] variables
+ if lastline.startswith('# "'):
+ val = lastline.split('"')[1]
+ if val:
+ var = match.group('var')
+ if variables is None:
+ values[var] = val
+ else:
+ if var in variables:
+ values[var] = val
+ variables.remove(var)
+ # Stop after all required variables have been found
+ if not variables:
+ break
+ lastline = line
+ if variables:
+ # Fill in missing values
+ for var in variables:
+ values[var] = None
+ return values
+
+def get_bb_var(var, target=None, postconfig=None):
+ return get_bb_vars([var], target, postconfig)[var]
+
+def get_test_layer():
+ layers = get_bb_var("BBLAYERS").split()
+ testlayer = None
+ for l in layers:
+ if '~' in l:
+ l = os.path.expanduser(l)
+ if "/meta-selftest" in l and os.path.isdir(l):
+ testlayer = l
+ break
+ return testlayer
+
+def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec='recipes-*/*'):
+ os.makedirs(os.path.join(templayerdir, 'conf'))
+ with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
+ f.write('BBPATH .= ":${LAYERDIR}"\n')
+ f.write('BBFILES += "${LAYERDIR}/%s/*.bb \\' % recipepathspec)
+ f.write(' ${LAYERDIR}/%s/*.bbappend"\n' % recipepathspec)
+ f.write('BBFILE_COLLECTIONS += "%s"\n' % templayername)
+ f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
+ f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
+ f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
+ f.write('LAYERSERIES_COMPAT_%s = "${LAYERSERIES_COMPAT_core}"\n' % templayername)
+
+@contextlib.contextmanager
+def runqemu(pn, ssh=True, runqemuparams='', image_fstype=None, launch_cmd=None, qemuparams=None, overrides={}, discard_writes=True):
+ """
+ launch_cmd means directly run the command, don't need set rootfs or env vars.
+ """
+
+ import bb.tinfoil
+ import bb.build
+
+ # Need a non-'BitBake' logger to capture the runner output
+ targetlogger = logging.getLogger('TargetRunner')
+ targetlogger.setLevel(logging.DEBUG)
+ handler = logging.StreamHandler(sys.stdout)
+ targetlogger.addHandler(handler)
+
+ tinfoil = bb.tinfoil.Tinfoil()
+ tinfoil.prepare(config_only=False, quiet=True)
+ try:
+ tinfoil.logger.setLevel(logging.WARNING)
+ import oeqa.targetcontrol
+ tinfoil.config_data.setVar("TEST_LOG_DIR", "${WORKDIR}/testimage")
+ tinfoil.config_data.setVar("TEST_QEMUBOOT_TIMEOUT", "1000")
+ # Tell QemuTarget() whether need find rootfs/kernel or not
+ if launch_cmd:
+ tinfoil.config_data.setVar("FIND_ROOTFS", '0')
+ else:
+ tinfoil.config_data.setVar("FIND_ROOTFS", '1')
+
+ recipedata = tinfoil.parse_recipe(pn)
+ for key, value in overrides.items():
+ recipedata.setVar(key, value)
+
+ logdir = recipedata.getVar("TEST_LOG_DIR")
+
+ qemu = oeqa.targetcontrol.QemuTarget(recipedata, targetlogger, image_fstype)
+ finally:
+ # We need to shut down tinfoil early here in case we actually want
+ # to run tinfoil-using utilities with the running QEMU instance.
+ # Luckily QemuTarget doesn't need it after the constructor.
+ tinfoil.shutdown()
+
+ try:
+ qemu.deploy()
+ try:
+ qemu.start(params=qemuparams, ssh=ssh, runqemuparams=runqemuparams, launch_cmd=launch_cmd, discard_writes=discard_writes)
+ except bb.build.FuncFailed:
+ raise Exception('Failed to start QEMU - see the logs in %s' % logdir)
+
+ yield qemu
+
+ finally:
+ try:
+ qemu.stop()
+ except:
+ pass
+ targetlogger.removeHandler(handler)
+
+def updateEnv(env_file):
+ """
+ Source a file and update environment.
+ """
+
+ cmd = ". %s; env -0" % env_file
+ result = runCmd(cmd)
+
+ for line in result.output.split("\0"):
+ (key, _, value) = line.partition("=")
+ os.environ[key] = value
diff --git a/poky/meta/lib/oeqa/utils/decorators.py b/poky/meta/lib/oeqa/utils/decorators.py
new file mode 100644
index 000000000..d87689692
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/decorators.py
@@ -0,0 +1,295 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Some custom decorators that can be used by unittests
+# Most useful is skipUnlessPassed which can be used for
+# creating dependecies between two test methods.
+
+import os
+import logging
+import sys
+import unittest
+import threading
+import signal
+from functools import wraps
+
+#get the "result" object from one of the upper frames provided that one of these upper frames is a unittest.case frame
+class getResults(object):
+ def __init__(self):
+ #dynamically determine the unittest.case frame and use it to get the name of the test method
+ ident = threading.current_thread().ident
+ upperf = sys._current_frames()[ident]
+ while (upperf.f_globals['__name__'] != 'unittest.case'):
+ upperf = upperf.f_back
+
+ def handleList(items):
+ ret = []
+ # items is a list of tuples, (test, failure) or (_ErrorHandler(), Exception())
+ for i in items:
+ s = i[0].id()
+ #Handle the _ErrorHolder objects from skipModule failures
+ if "setUpModule (" in s:
+ ret.append(s.replace("setUpModule (", "").replace(")",""))
+ else:
+ ret.append(s)
+ # Append also the test without the full path
+ testname = s.split('.')[-1]
+ if testname:
+ ret.append(testname)
+ return ret
+ self.faillist = handleList(upperf.f_locals['result'].failures)
+ self.errorlist = handleList(upperf.f_locals['result'].errors)
+ self.skiplist = handleList(upperf.f_locals['result'].skipped)
+
+ def getFailList(self):
+ return self.faillist
+
+ def getErrorList(self):
+ return self.errorlist
+
+ def getSkipList(self):
+ return self.skiplist
+
+class skipIfFailure(object):
+
+ def __init__(self,testcase):
+ self.testcase = testcase
+
+ def __call__(self,f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ res = getResults()
+ if self.testcase in (res.getFailList() or res.getErrorList()):
+ raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
+ return f(*args, **kwargs)
+ wrapped_f.__name__ = f.__name__
+ return wrapped_f
+
+class skipIfSkipped(object):
+
+ def __init__(self,testcase):
+ self.testcase = testcase
+
+ def __call__(self,f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ res = getResults()
+ if self.testcase in res.getSkipList():
+ raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
+ return f(*args, **kwargs)
+ wrapped_f.__name__ = f.__name__
+ return wrapped_f
+
+class skipUnlessPassed(object):
+
+ def __init__(self,testcase):
+ self.testcase = testcase
+
+ def __call__(self,f):
+ @wraps(f)
+ def wrapped_f(*args, **kwargs):
+ res = getResults()
+ if self.testcase in res.getSkipList() or \
+ self.testcase in res.getFailList() or \
+ self.testcase in res.getErrorList():
+ raise unittest.SkipTest("Testcase dependency not met: %s" % self.testcase)
+ return f(*args, **kwargs)
+ wrapped_f.__name__ = f.__name__
+ wrapped_f._depends_on = self.testcase
+ return wrapped_f
+
+class testcase(object):
+ def __init__(self, test_case):
+ self.test_case = test_case
+
+ def __call__(self, func):
+ @wraps(func)
+ def wrapped_f(*args, **kwargs):
+ return func(*args, **kwargs)
+ wrapped_f.test_case = self.test_case
+ wrapped_f.__name__ = func.__name__
+ return wrapped_f
+
+class NoParsingFilter(logging.Filter):
+ def filter(self, record):
+ return record.levelno == 100
+
+import inspect
+
+def LogResults(original_class):
+ orig_method = original_class.run
+
+ from time import strftime, gmtime
+ caller = os.path.basename(sys.argv[0])
+ timestamp = strftime('%Y%m%d%H%M%S',gmtime())
+ logfile = os.path.join(os.getcwd(),'results-'+caller+'.'+timestamp+'.log')
+ linkfile = os.path.join(os.getcwd(),'results-'+caller+'.log')
+
+ def get_class_that_defined_method(meth):
+ if inspect.ismethod(meth):
+ for cls in inspect.getmro(meth.__self__.__class__):
+ if cls.__dict__.get(meth.__name__) is meth:
+ return cls
+ meth = meth.__func__ # fallback to __qualname__ parsing
+ if inspect.isfunction(meth):
+ cls = getattr(inspect.getmodule(meth),
+ meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
+ if isinstance(cls, type):
+ return cls
+ return None
+
+ #rewrite the run method of unittest.TestCase to add testcase logging
+ def run(self, result, *args, **kws):
+ orig_method(self, result, *args, **kws)
+ passed = True
+ testMethod = getattr(self, self._testMethodName)
+ #if test case is decorated then use it's number, else use it's name
+ try:
+ test_case = testMethod.test_case
+ except AttributeError:
+ test_case = self._testMethodName
+
+ class_name = str(get_class_that_defined_method(testMethod)).split("'")[1]
+
+ #create custom logging level for filtering.
+ custom_log_level = 100
+ logging.addLevelName(custom_log_level, 'RESULTS')
+
+ def results(self, message, *args, **kws):
+ if self.isEnabledFor(custom_log_level):
+ self.log(custom_log_level, message, *args, **kws)
+ logging.Logger.results = results
+
+ logging.basicConfig(filename=logfile,
+ filemode='w',
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ datefmt='%H:%M:%S',
+ level=custom_log_level)
+ for handler in logging.root.handlers:
+ handler.addFilter(NoParsingFilter())
+ local_log = logging.getLogger(caller)
+
+ #check status of tests and record it
+
+ tcid = self.id()
+ for (name, msg) in result.errors:
+ if tcid == name.id():
+ local_log.results("Testcase "+str(test_case)+": ERROR")
+ local_log.results("Testcase "+str(test_case)+":\n"+msg)
+ passed = False
+ for (name, msg) in result.failures:
+ if tcid == name.id():
+ local_log.results("Testcase "+str(test_case)+": FAILED")
+ local_log.results("Testcase "+str(test_case)+":\n"+msg)
+ passed = False
+ for (name, msg) in result.skipped:
+ if tcid == name.id():
+ local_log.results("Testcase "+str(test_case)+": SKIPPED")
+ passed = False
+ if passed:
+ local_log.results("Testcase "+str(test_case)+": PASSED")
+
+ # XXX: In order to avoid race condition when test if exists the linkfile
+ # use bb.utils.lock, the best solution is to create a unique name for the
+ # link file.
+ try:
+ import bb
+ has_bb = True
+ lockfilename = linkfile + '.lock'
+ except ImportError:
+ has_bb = False
+
+ if has_bb:
+ lf = bb.utils.lockfile(lockfilename, block=True)
+ # Create symlink to the current log
+ if os.path.lexists(linkfile):
+ os.remove(linkfile)
+ os.symlink(logfile, linkfile)
+ if has_bb:
+ bb.utils.unlockfile(lf)
+
+ original_class.run = run
+
+ return original_class
+
+class TimeOut(BaseException):
+ pass
+
+def timeout(seconds):
+ def decorator(fn):
+ if hasattr(signal, 'alarm'):
+ @wraps(fn)
+ def wrapped_f(*args, **kw):
+ current_frame = sys._getframe()
+ def raiseTimeOut(signal, frame):
+ if frame is not current_frame:
+ raise TimeOut('%s seconds' % seconds)
+ prev_handler = signal.signal(signal.SIGALRM, raiseTimeOut)
+ try:
+ signal.alarm(seconds)
+ return fn(*args, **kw)
+ finally:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, prev_handler)
+ return wrapped_f
+ else:
+ return fn
+ return decorator
+
+__tag_prefix = "tag__"
+def tag(*args, **kwargs):
+ """Decorator that adds attributes to classes or functions
+ for use with the Attribute (-a) plugin.
+ """
+ def wrap_ob(ob):
+ for name in args:
+ setattr(ob, __tag_prefix + name, True)
+ for name, value in kwargs.items():
+ setattr(ob, __tag_prefix + name, value)
+ return ob
+ return wrap_ob
+
+def gettag(obj, key, default=None):
+ key = __tag_prefix + key
+ if not isinstance(obj, unittest.TestCase):
+ return getattr(obj, key, default)
+ tc_method = getattr(obj, obj._testMethodName)
+ ret = getattr(tc_method, key, getattr(obj, key, default))
+ return ret
+
+def getAllTags(obj):
+ def __gettags(o):
+ r = {k[len(__tag_prefix):]:getattr(o,k) for k in dir(o) if k.startswith(__tag_prefix)}
+ return r
+ if not isinstance(obj, unittest.TestCase):
+ return __gettags(obj)
+ tc_method = getattr(obj, obj._testMethodName)
+ ret = __gettags(obj)
+ ret.update(__gettags(tc_method))
+ return ret
+
+def timeout_handler(seconds):
+ def decorator(fn):
+ if hasattr(signal, 'alarm'):
+ @wraps(fn)
+ def wrapped_f(self, *args, **kw):
+ current_frame = sys._getframe()
+ def raiseTimeOut(signal, frame):
+ if frame is not current_frame:
+ try:
+ self.target.restart()
+ raise TimeOut('%s seconds' % seconds)
+ except:
+ raise TimeOut('%s seconds' % seconds)
+ prev_handler = signal.signal(signal.SIGALRM, raiseTimeOut)
+ try:
+ signal.alarm(seconds)
+ return fn(self, *args, **kw)
+ finally:
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, prev_handler)
+ return wrapped_f
+ else:
+ return fn
+ return decorator
diff --git a/poky/meta/lib/oeqa/utils/dump.py b/poky/meta/lib/oeqa/utils/dump.py
new file mode 100644
index 000000000..5a7edc1a8
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/dump.py
@@ -0,0 +1,91 @@
+import os
+import sys
+import errno
+import datetime
+import itertools
+from .commands import runCmd
+
+class BaseDumper(object):
+ """ Base class to dump commands from host/target """
+
+ def __init__(self, cmds, parent_dir):
+ self.cmds = []
+ # Some testing doesn't inherit testimage, so it is needed
+ # to set some defaults.
+ self.parent_dir = parent_dir or "/tmp/oe-saved-tests"
+ dft_cmds = """ top -bn1
+ iostat -x -z -N -d -p ALL 20 2
+ ps -ef
+ free
+ df
+ memstat
+ dmesg
+ ip -s link
+ netstat -an"""
+ if not cmds:
+ cmds = dft_cmds
+ for cmd in cmds.split('\n'):
+ cmd = cmd.lstrip()
+ if not cmd or cmd[0] == '#':
+ continue
+ self.cmds.append(cmd)
+
+ def create_dir(self, dir_suffix):
+ dump_subdir = ("%s_%s" % (
+ datetime.datetime.now().strftime('%Y%m%d%H%M'),
+ dir_suffix))
+ dump_dir = os.path.join(self.parent_dir, dump_subdir)
+ try:
+ os.makedirs(dump_dir)
+ except OSError as err:
+ if err.errno != errno.EEXIST:
+ raise err
+ self.dump_dir = dump_dir
+
+ def _write_dump(self, command, output):
+ if isinstance(self, HostDumper):
+ prefix = "host"
+ elif isinstance(self, TargetDumper):
+ prefix = "target"
+ else:
+ prefix = "unknown"
+ for i in itertools.count():
+ filename = "%s_%02d_%s" % (prefix, i, command)
+ fullname = os.path.join(self.dump_dir, filename)
+ if not os.path.exists(fullname):
+ break
+ with open(fullname, 'w') as dump_file:
+ dump_file.write(output)
+
+
+class HostDumper(BaseDumper):
+ """ Class to get dumps from the host running the tests """
+
+ def __init__(self, cmds, parent_dir):
+ super(HostDumper, self).__init__(cmds, parent_dir)
+
+ def dump_host(self, dump_dir=""):
+ if dump_dir:
+ self.dump_dir = dump_dir
+ for cmd in self.cmds:
+ result = runCmd(cmd, ignore_status=True)
+ self._write_dump(cmd.split()[0], result.output)
+
+class TargetDumper(BaseDumper):
+ """ Class to get dumps from target, it only works with QemuRunner """
+
+ def __init__(self, cmds, parent_dir, runner):
+ super(TargetDumper, self).__init__(cmds, parent_dir)
+ self.runner = runner
+
+ def dump_target(self, dump_dir=""):
+ if dump_dir:
+ self.dump_dir = dump_dir
+ for cmd in self.cmds:
+ # We can continue with the testing if serial commands fail
+ try:
+ (status, output) = self.runner.run_serial(cmd)
+ self._write_dump(cmd.split()[0], output)
+ except:
+ print("Tried to dump info from target but "
+ "serial console failed")
diff --git a/poky/meta/lib/oeqa/utils/ftools.py b/poky/meta/lib/oeqa/utils/ftools.py
new file mode 100644
index 000000000..a7233d4ca
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/ftools.py
@@ -0,0 +1,46 @@
+import os
+import re
+import errno
+
+def write_file(path, data):
+ # In case data is None, return immediately
+ if data is None:
+ return
+ wdata = data.rstrip() + "\n"
+ with open(path, "w") as f:
+ f.write(wdata)
+
+def append_file(path, data):
+ # In case data is None, return immediately
+ if data is None:
+ return
+ wdata = data.rstrip() + "\n"
+ with open(path, "a") as f:
+ f.write(wdata)
+
+def read_file(path):
+ data = None
+ with open(path) as f:
+ data = f.read()
+ return data
+
+def remove_from_file(path, data):
+ # In case data is None, return immediately
+ if data is None:
+ return
+ try:
+ rdata = read_file(path)
+ except IOError as e:
+ # if file does not exit, just quit, otherwise raise an exception
+ if e.errno == errno.ENOENT:
+ return
+ else:
+ raise
+
+ contents = rdata.strip().splitlines()
+ for r in data.strip().splitlines():
+ try:
+ contents.remove(r)
+ except ValueError:
+ pass
+ write_file(path, "\n".join(contents))
diff --git a/poky/meta/lib/oeqa/utils/git.py b/poky/meta/lib/oeqa/utils/git.py
new file mode 100644
index 000000000..757e3f0cb
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/git.py
@@ -0,0 +1,80 @@
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+#
+"""Git repository interactions"""
+import os
+
+from oeqa.utils.commands import runCmd
+
+
+class GitError(Exception):
+ """Git error handling"""
+ pass
+
+class GitRepo(object):
+ """Class representing a Git repository clone"""
+ def __init__(self, path, is_topdir=False):
+ git_dir = self._run_git_cmd_at(['rev-parse', '--git-dir'], path)
+ git_dir = git_dir if os.path.isabs(git_dir) else os.path.join(path, git_dir)
+ self.git_dir = os.path.realpath(git_dir)
+
+ if self._run_git_cmd_at(['rev-parse', '--is-bare-repository'], path) == 'true':
+ self.bare = True
+ self.top_dir = self.git_dir
+ else:
+ self.bare = False
+ self.top_dir = self._run_git_cmd_at(['rev-parse', '--show-toplevel'],
+ path)
+ realpath = os.path.realpath(path)
+ if is_topdir and realpath != self.top_dir:
+ raise GitError("{} is not a Git top directory".format(realpath))
+
+ @staticmethod
+ def _run_git_cmd_at(git_args, cwd, **kwargs):
+ """Run git command at a specified directory"""
+ git_cmd = 'git ' if isinstance(git_args, str) else ['git']
+ git_cmd += git_args
+ ret = runCmd(git_cmd, ignore_status=True, cwd=cwd, **kwargs)
+ if ret.status:
+ cmd_str = git_cmd if isinstance(git_cmd, str) \
+ else ' '.join(git_cmd)
+ raise GitError("'{}' failed with exit code {}: {}".format(
+ cmd_str, ret.status, ret.output))
+ return ret.output.strip()
+
+ @staticmethod
+ def init(path, bare=False):
+ """Initialize a new Git repository"""
+ cmd = ['init']
+ if bare:
+ cmd.append('--bare')
+ GitRepo._run_git_cmd_at(cmd, cwd=path)
+ return GitRepo(path, is_topdir=True)
+
+ def run_cmd(self, git_args, env_update=None):
+ """Run Git command"""
+ env = None
+ if env_update:
+ env = os.environ.copy()
+ env.update(env_update)
+ return self._run_git_cmd_at(git_args, self.top_dir, env=env)
+
+ def rev_parse(self, revision):
+ """Do git rev-parse"""
+ try:
+ return self.run_cmd(['rev-parse', '--verify', revision])
+ except GitError:
+ # Revision does not exist
+ return None
+
+ def get_current_branch(self):
+ """Get current branch"""
+ try:
+ # Strip 11 chars, i.e. 'refs/heads' from the beginning
+ return self.run_cmd(['symbolic-ref', 'HEAD'])[11:]
+ except GitError:
+ return None
+
+
diff --git a/poky/meta/lib/oeqa/utils/httpserver.py b/poky/meta/lib/oeqa/utils/httpserver.py
new file mode 100644
index 000000000..7d1233145
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/httpserver.py
@@ -0,0 +1,36 @@
+import http.server
+import multiprocessing
+import os
+from socketserver import ThreadingMixIn
+
+class HTTPServer(ThreadingMixIn, http.server.HTTPServer):
+
+ def server_start(self, root_dir):
+ import signal
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ os.chdir(root_dir)
+ self.serve_forever()
+
+class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
+
+ def log_message(self, format_str, *args):
+ pass
+
+class HTTPService(object):
+
+ def __init__(self, root_dir, host=''):
+ self.root_dir = root_dir
+ self.host = host
+ self.port = 0
+
+ def start(self):
+ self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
+ if self.port == 0:
+ self.port = self.server.server_port
+ self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir])
+ self.process.start()
+
+ def stop(self):
+ self.server.server_close()
+ self.process.terminate()
+ self.process.join()
diff --git a/poky/meta/lib/oeqa/utils/logparser.py b/poky/meta/lib/oeqa/utils/logparser.py
new file mode 100644
index 000000000..0670627c3
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/logparser.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+import sys
+import os
+import re
+from . import ftools
+
+
+# A parser that can be used to identify weather a line is a test result or a section statement.
+class Lparser(object):
+
+ def __init__(self, test_0_pass_regex, test_0_fail_regex, test_0_skip_regex, section_0_begin_regex=None, section_0_end_regex=None, **kwargs):
+ # Initialize the arguments dictionary
+ if kwargs:
+ self.args = kwargs
+ else:
+ self.args = {}
+
+ # Add the default args to the dictionary
+ self.args['test_0_pass_regex'] = test_0_pass_regex
+ self.args['test_0_fail_regex'] = test_0_fail_regex
+ self.args['test_0_skip_regex'] = test_0_skip_regex
+ if section_0_begin_regex:
+ self.args['section_0_begin_regex'] = section_0_begin_regex
+ if section_0_end_regex:
+ self.args['section_0_end_regex'] = section_0_end_regex
+
+ self.test_possible_status = ['pass', 'fail', 'error', 'skip']
+ self.section_possible_status = ['begin', 'end']
+
+ self.initialized = False
+
+
+ # Initialize the parser with the current configuration
+ def init(self):
+
+ # extra arguments can be added by the user to define new test and section categories. They must follow a pre-defined pattern: <type>_<category_name>_<status>_regex
+ self.test_argument_pattern = "^test_(.+?)_(%s)_regex" % '|'.join(map(str, self.test_possible_status))
+ self.section_argument_pattern = "^section_(.+?)_(%s)_regex" % '|'.join(map(str, self.section_possible_status))
+
+ # Initialize the test and section regex dictionaries
+ self.test_regex = {}
+ self.section_regex ={}
+
+ for arg, value in self.args.items():
+ if not value:
+ raise Exception('The value of provided argument %s is %s. Should have a valid value.' % (key, value))
+ is_test = re.search(self.test_argument_pattern, arg)
+ is_section = re.search(self.section_argument_pattern, arg)
+ if is_test:
+ if not is_test.group(1) in self.test_regex:
+ self.test_regex[is_test.group(1)] = {}
+ self.test_regex[is_test.group(1)][is_test.group(2)] = re.compile(value)
+ elif is_section:
+ if not is_section.group(1) in self.section_regex:
+ self.section_regex[is_section.group(1)] = {}
+ self.section_regex[is_section.group(1)][is_section.group(2)] = re.compile(value)
+ else:
+ # TODO: Make these call a traceback instead of a simple exception..
+ raise Exception("The provided argument name does not correspond to any valid type. Please give one of the following types:\nfor tests: %s\nfor sections: %s" % (self.test_argument_pattern, self.section_argument_pattern))
+
+ self.initialized = True
+
+ # Parse a line and return a tuple containing the type of result (test/section) and its category, status and name
+ def parse_line(self, line):
+ if not self.initialized:
+ raise Exception("The parser is not initialized..")
+
+ for test_category, test_status_list in self.test_regex.items():
+ for test_status, status_regex in test_status_list.items():
+ test_name = status_regex.search(line)
+ if test_name:
+ return ['test', test_category, test_status, test_name.group(1)]
+
+ for section_category, section_status_list in self.section_regex.items():
+ for section_status, status_regex in section_status_list.items():
+ section_name = status_regex.search(line)
+ if section_name:
+ return ['section', section_category, section_status, section_name.group(1)]
+ return None
+
+
+class Result(object):
+
+ def __init__(self):
+ self.result_dict = {}
+
+ def store(self, section, test, status):
+ if not section in self.result_dict:
+ self.result_dict[section] = []
+
+ self.result_dict[section].append((test, status))
+
+ # sort tests by the test name(the first element of the tuple), for each section. This can be helpful when using git to diff for changes by making sure they are always in the same order.
+ def sort_tests(self):
+ for package in self.result_dict:
+ sorted_results = sorted(self.result_dict[package], key=lambda tup: tup[0])
+ self.result_dict[package] = sorted_results
+
+ # Log the results as files. The file name is the section name and the contents are the tests in that section.
+ def log_as_files(self, target_dir, test_status):
+ status_regex = re.compile('|'.join(map(str, test_status)))
+ if not type(test_status) == type([]):
+ raise Exception("test_status should be a list. Got " + str(test_status) + " instead.")
+ if not os.path.exists(target_dir):
+ raise Exception("Target directory does not exist: %s" % target_dir)
+
+ for section, test_results in self.result_dict.items():
+ prefix = ''
+ for x in test_status:
+ prefix +=x+'.'
+ if section:
+ prefix += section
+ section_file = os.path.join(target_dir, prefix)
+ # purge the file contents if it exists
+ open(section_file, 'w').close()
+ for test_result in test_results:
+ (test_name, status) = test_result
+ # we log only the tests with status in the test_status list
+ match_status = status_regex.search(status)
+ if match_status:
+ ftools.append_file(section_file, status + ": " + test_name)
+
+ # Not yet implemented!
+ def log_to_lava(self):
+ pass
diff --git a/poky/meta/lib/oeqa/utils/metadata.py b/poky/meta/lib/oeqa/utils/metadata.py
new file mode 100644
index 000000000..65bbdc61f
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/metadata.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+#
+# Functions to get metadata from the testing host used
+# for analytics of test results.
+
+from collections import OrderedDict
+from collections.abc import MutableMapping
+from xml.dom.minidom import parseString
+from xml.etree.ElementTree import Element, tostring
+
+from oe.lsb import get_os_release
+from oeqa.utils.commands import runCmd, get_bb_vars
+
+
+def metadata_from_bb():
+ """ Returns test's metadata as OrderedDict.
+
+ Data will be gathered using bitbake -e thanks to get_bb_vars.
+ """
+ metadata_config_vars = ('MACHINE', 'BB_NUMBER_THREADS', 'PARALLEL_MAKE')
+
+ info_dict = OrderedDict()
+ hostname = runCmd('hostname')
+ info_dict['hostname'] = hostname.output
+ data_dict = get_bb_vars()
+
+ # Distro information
+ info_dict['distro'] = {'id': data_dict['DISTRO'],
+ 'version_id': data_dict['DISTRO_VERSION'],
+ 'pretty_name': '%s %s' % (data_dict['DISTRO'], data_dict['DISTRO_VERSION'])}
+
+ # Host distro information
+ os_release = get_os_release()
+ if os_release:
+ info_dict['host_distro'] = OrderedDict()
+ for key in ('ID', 'VERSION_ID', 'PRETTY_NAME'):
+ if key in os_release:
+ info_dict['host_distro'][key.lower()] = os_release[key]
+
+ info_dict['layers'] = get_layers(data_dict['BBLAYERS'])
+ info_dict['bitbake'] = git_rev_info(os.path.dirname(bb.__file__))
+
+ info_dict['config'] = OrderedDict()
+ for var in sorted(metadata_config_vars):
+ info_dict['config'][var] = data_dict[var]
+ return info_dict
+
+def metadata_from_data_store(d):
+ """ Returns test's metadata as OrderedDict.
+
+ Data will be collected from the provided data store.
+ """
+ # TODO: Getting metadata from the data store would
+ # be useful when running within bitbake.
+ pass
+
+def git_rev_info(path):
+ """Get git revision information as a dict"""
+ from git import Repo, InvalidGitRepositoryError, NoSuchPathError
+
+ info = OrderedDict()
+ try:
+ repo = Repo(path, search_parent_directories=True)
+ except (InvalidGitRepositoryError, NoSuchPathError):
+ return info
+ info['commit'] = repo.head.commit.hexsha
+ info['commit_count'] = repo.head.commit.count()
+ try:
+ info['branch'] = repo.active_branch.name
+ except TypeError:
+ info['branch'] = '(nobranch)'
+ return info
+
+def get_layers(layers):
+ """Returns layer information in dict format"""
+ layer_dict = OrderedDict()
+ for layer in layers.split():
+ layer_name = os.path.basename(layer)
+ layer_dict[layer_name] = git_rev_info(layer)
+ return layer_dict
+
+def write_metadata_file(file_path, metadata):
+ """ Writes metadata to a XML file in directory. """
+
+ xml = dict_to_XML('metadata', metadata)
+ xml_doc = parseString(tostring(xml).decode('UTF-8'))
+ with open(file_path, 'w') as f:
+ f.write(xml_doc.toprettyxml())
+
+def dict_to_XML(tag, dictionary, **kwargs):
+ """ Return XML element converting dicts recursively. """
+
+ elem = Element(tag, **kwargs)
+ for key, val in dictionary.items():
+ if tag == 'layers':
+ child = (dict_to_XML('layer', val, name=key))
+ elif isinstance(val, MutableMapping):
+ child = (dict_to_XML(key, val))
+ else:
+ if tag == 'config':
+ child = Element('variable', name=key)
+ else:
+ child = Element(key)
+ child.text = str(val)
+ elem.append(child)
+ return elem
diff --git a/poky/meta/lib/oeqa/utils/network.py b/poky/meta/lib/oeqa/utils/network.py
new file mode 100644
index 000000000..2768f6c5d
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/network.py
@@ -0,0 +1,8 @@
+import socket
+
+def get_free_port():
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(('', 0))
+ addr = s.getsockname()
+ s.close()
+ return addr[1]
diff --git a/poky/meta/lib/oeqa/utils/package_manager.py b/poky/meta/lib/oeqa/utils/package_manager.py
new file mode 100644
index 000000000..afd5b8e75
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/package_manager.py
@@ -0,0 +1,211 @@
+import os
+import json
+import shutil
+
+from oeqa.core.utils.test import getCaseFile, getCaseMethod
+
+def get_package_manager(d, root_path):
+ """
+ Returns an OE package manager that can install packages in root_path.
+ """
+ from oe.package_manager import RpmPM, OpkgPM, DpkgPM
+
+ pkg_class = d.getVar("IMAGE_PKGTYPE")
+ if pkg_class == "rpm":
+ pm = RpmPM(d,
+ root_path,
+ d.getVar('TARGET_VENDOR'),
+ filterbydependencies=False)
+ pm.create_configs()
+
+ elif pkg_class == "ipk":
+ pm = OpkgPM(d,
+ root_path,
+ d.getVar("IPKGCONF_TARGET"),
+ d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
+
+ elif pkg_class == "deb":
+ pm = DpkgPM(d,
+ root_path,
+ d.getVar('PACKAGE_ARCHS'),
+ d.getVar('DPKG_ARCH'))
+
+ pm.write_index()
+ pm.update()
+
+ return pm
+
+def find_packages_to_extract(test_suite):
+ """
+ Returns packages to extract required by runtime tests.
+ """
+ from oeqa.core.utils.test import getSuiteCasesFiles
+
+ needed_packages = {}
+ files = getSuiteCasesFiles(test_suite)
+
+ for f in set(files):
+ json_file = _get_json_file(f)
+ if json_file:
+ needed_packages.update(_get_needed_packages(json_file))
+
+ return needed_packages
+
+def _get_json_file(module_path):
+ """
+ Returns the path of the JSON file for a module, empty if doesn't exitst.
+ """
+
+ json_file = '%s.json' % module_path.rsplit('.', 1)[0]
+ if os.path.isfile(module_path) and os.path.isfile(json_file):
+ return json_file
+ else:
+ return ''
+
+def _get_needed_packages(json_file, test=None):
+ """
+ Returns a dict with needed packages based on a JSON file.
+
+ If a test is specified it will return the dict just for that test.
+ """
+ needed_packages = {}
+
+ with open(json_file) as f:
+ test_packages = json.load(f)
+ for key,value in test_packages.items():
+ needed_packages[key] = value
+
+ if test:
+ if test in needed_packages:
+ needed_packages = needed_packages[test]
+ else:
+ needed_packages = {}
+
+ return needed_packages
+
+def extract_packages(d, needed_packages):
+ """
+ Extract packages that will be needed during runtime.
+ """
+
+ import bb
+ import oe.path
+
+ extracted_path = d.getVar('TEST_EXTRACTED_DIR')
+
+ for key,value in needed_packages.items():
+ packages = ()
+ if isinstance(value, dict):
+ packages = (value, )
+ elif isinstance(value, list):
+ packages = value
+ else:
+ bb.fatal('Failed to process needed packages for %s; '
+ 'Value must be a dict or list' % key)
+
+ for package in packages:
+ pkg = package['pkg']
+ rm = package.get('rm', False)
+ extract = package.get('extract', True)
+
+ if extract:
+ #logger.debug(1, 'Extracting %s' % pkg)
+ dst_dir = os.path.join(extracted_path, pkg)
+ # Same package used for more than one test,
+ # don't need to extract again.
+ if os.path.exists(dst_dir):
+ continue
+
+ # Extract package and copy it to TEST_EXTRACTED_DIR
+ pkg_dir = _extract_in_tmpdir(d, pkg)
+ oe.path.copytree(pkg_dir, dst_dir)
+ shutil.rmtree(pkg_dir)
+
+ else:
+ #logger.debug(1, 'Copying %s' % pkg)
+ _copy_package(d, pkg)
+
+def _extract_in_tmpdir(d, pkg):
+ """"
+ Returns path to a temp directory where the package was
+ extracted without dependencies.
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
+ pm = get_package_manager(d, pkg_path)
+ extract_dir = pm.extract(pkg)
+ shutil.rmtree(pkg_path)
+
+ return extract_dir
+
+def _copy_package(d, pkg):
+ """
+ Copy the RPM, DEB or IPK package to dst_dir
+ """
+
+ from oeqa.utils.package_manager import get_package_manager
+
+ pkg_path = os.path.join(d.getVar('TEST_INSTALL_TMP_DIR'), pkg)
+ dst_dir = d.getVar('TEST_PACKAGED_DIR')
+ pm = get_package_manager(d, pkg_path)
+ pkg_info = pm.package_info(pkg)
+ file_path = pkg_info[pkg]['filepath']
+ shutil.copy2(file_path, dst_dir)
+ shutil.rmtree(pkg_path)
+
+def install_package(test_case):
+ """
+ Installs package in DUT if required.
+ """
+ needed_packages = test_needs_package(test_case)
+ if needed_packages:
+ _install_uninstall_packages(needed_packages, test_case, True)
+
+def uninstall_package(test_case):
+ """
+ Uninstalls package in DUT if required.
+ """
+ needed_packages = test_needs_package(test_case)
+ if needed_packages:
+ _install_uninstall_packages(needed_packages, test_case, False)
+
+def test_needs_package(test_case):
+ """
+ Checks if a test case requires to install/uninstall packages.
+ """
+ test_file = getCaseFile(test_case)
+ json_file = _get_json_file(test_file)
+
+ if json_file:
+ test_method = getCaseMethod(test_case)
+ needed_packages = _get_needed_packages(json_file, test_method)
+ if needed_packages:
+ return needed_packages
+
+ return None
+
+def _install_uninstall_packages(needed_packages, test_case, install=True):
+ """
+ Install/Uninstall packages in the DUT without using a package manager
+ """
+
+ if isinstance(needed_packages, dict):
+ packages = [needed_packages]
+ elif isinstance(needed_packages, list):
+ packages = needed_packages
+
+ for package in packages:
+ pkg = package['pkg']
+ rm = package.get('rm', False)
+ extract = package.get('extract', True)
+ src_dir = os.path.join(test_case.tc.extract_dir, pkg)
+
+ # Install package
+ if install and extract:
+ test_case.tc.target.copyDirTo(src_dir, '/')
+
+ # Uninstall package
+ elif not install and rm:
+ test_case.tc.target.deleteDirStructure(src_dir, '/')
diff --git a/poky/meta/lib/oeqa/utils/qemurunner.py b/poky/meta/lib/oeqa/utils/qemurunner.py
new file mode 100644
index 000000000..c962602a6
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/qemurunner.py
@@ -0,0 +1,591 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# This module provides a class for starting qemu images using runqemu.
+# It's used by testimage.bbclass.
+
+import subprocess
+import os
+import sys
+import time
+import signal
+import re
+import socket
+import select
+import errno
+import string
+import threading
+import codecs
+import logging
+from oeqa.utils.dump import HostDumper
+
+# Get Unicode non printable control chars
+control_range = list(range(0,32))+list(range(127,160))
+control_chars = [chr(x) for x in control_range
+ if chr(x) not in string.printable]
+re_control_char = re.compile('[%s]' % re.escape("".join(control_chars)))
+
+class QemuRunner:
+
+ def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, boottime, dump_dir, dump_host_cmds, use_kvm, logger):
+
+ # Popen object for runqemu
+ self.runqemu = None
+ # pid of the qemu process that runqemu will start
+ self.qemupid = None
+ # target ip - from the command line or runqemu output
+ self.ip = None
+ # host ip - where qemu is running
+ self.server_ip = None
+ # target ip netmask
+ self.netmask = None
+
+ self.machine = machine
+ self.rootfs = rootfs
+ self.display = display
+ self.tmpdir = tmpdir
+ self.deploy_dir_image = deploy_dir_image
+ self.logfile = logfile
+ self.boottime = boottime
+ self.logged = False
+ self.thread = None
+ self.use_kvm = use_kvm
+ self.msg = ''
+
+ self.runqemutime = 120
+ self.qemu_pidfile = 'pidfile_'+str(os.getpid())
+ self.host_dumper = HostDumper(dump_host_cmds, dump_dir)
+
+ self.logger = logger
+
+ def create_socket(self):
+ try:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setblocking(0)
+ sock.bind(("127.0.0.1",0))
+ sock.listen(2)
+ port = sock.getsockname()[1]
+ self.logger.debug("Created listening socket for qemu serial console on: 127.0.0.1:%s" % port)
+ return (sock, port)
+
+ except socket.error:
+ sock.close()
+ raise
+
+ def log(self, msg):
+ if self.logfile:
+ # It is needed to sanitize the data received from qemu
+ # because is possible to have control characters
+ msg = msg.decode("utf-8", errors='ignore')
+ msg = re_control_char.sub('', msg)
+ self.msg += msg
+ with codecs.open(self.logfile, "a", encoding="utf-8") as f:
+ f.write("%s" % msg)
+
+ def getOutput(self, o):
+ import fcntl
+ fl = fcntl.fcntl(o, fcntl.F_GETFL)
+ fcntl.fcntl(o, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+ return os.read(o.fileno(), 1000000).decode("utf-8")
+
+
+ def handleSIGCHLD(self, signum, frame):
+ if self.runqemu and self.runqemu.poll():
+ if self.runqemu.returncode:
+ self.logger.debug('runqemu exited with code %d' % self.runqemu.returncode)
+ self.logger.debug("Output from runqemu:\n%s" % self.getOutput(self.runqemu.stdout))
+ self.stop()
+ self._dump_host()
+ raise SystemExit
+
+ def start(self, qemuparams = None, get_ip = True, extra_bootparams = None, runqemuparams='', launch_cmd=None, discard_writes=True):
+ env = os.environ.copy()
+ if self.display:
+ env["DISPLAY"] = self.display
+ # Set this flag so that Qemu doesn't do any grabs as SDL grabs
+ # interact badly with screensavers.
+ env["QEMU_DONT_GRAB"] = "1"
+ if not os.path.exists(self.rootfs):
+ self.logger.error("Invalid rootfs %s" % self.rootfs)
+ return False
+ if not os.path.exists(self.tmpdir):
+ self.logger.error("Invalid TMPDIR path %s" % self.tmpdir)
+ return False
+ else:
+ env["OE_TMPDIR"] = self.tmpdir
+ if not os.path.exists(self.deploy_dir_image):
+ self.logger.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
+ return False
+ else:
+ env["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+
+ if not launch_cmd:
+ launch_cmd = 'runqemu %s %s ' % ('snapshot' if discard_writes else '', runqemuparams)
+ if self.use_kvm:
+ self.logger.debug('Using kvm for runqemu')
+ launch_cmd += ' kvm'
+ else:
+ self.logger.debug('Not using kvm for runqemu')
+ if not self.display:
+ launch_cmd += ' nographic'
+ launch_cmd += ' %s %s' % (self.machine, self.rootfs)
+
+ return self.launch(launch_cmd, qemuparams=qemuparams, get_ip=get_ip, extra_bootparams=extra_bootparams, env=env)
+
+ def launch(self, launch_cmd, get_ip = True, qemuparams = None, extra_bootparams = None, env = None):
+ try:
+ threadsock, threadport = self.create_socket()
+ self.server_socket, self.serverport = self.create_socket()
+ except socket.error as msg:
+ self.logger.error("Failed to create listening socket: %s" % msg[1])
+ return False
+
+ bootparams = 'console=tty1 console=ttyS0,115200n8 printk.time=1'
+ if extra_bootparams:
+ bootparams = bootparams + ' ' + extra_bootparams
+
+ # Ask QEMU to store the QEMU process PID in file, this way we don't have to parse running processes
+ # and analyze descendents in order to determine it.
+ if os.path.exists(self.qemu_pidfile):
+ os.remove(self.qemu_pidfile)
+ self.qemuparams = 'bootparams="{0}" qemuparams="-serial tcp:127.0.0.1:{1} -pidfile {2}"'.format(bootparams, threadport, self.qemu_pidfile)
+ if qemuparams:
+ self.qemuparams = self.qemuparams[:-1] + " " + qemuparams + " " + '\"'
+
+ launch_cmd += ' tcpserial=%s %s' % (self.serverport, self.qemuparams)
+
+ self.origchldhandler = signal.getsignal(signal.SIGCHLD)
+ signal.signal(signal.SIGCHLD, self.handleSIGCHLD)
+
+ self.logger.debug('launchcmd=%s'%(launch_cmd))
+
+ # FIXME: We pass in stdin=subprocess.PIPE here to work around stty
+ # blocking at the end of the runqemu script when using this within
+ # oe-selftest (this makes stty error out immediately). There ought
+ # to be a proper fix but this will suffice for now.
+ self.runqemu = subprocess.Popen(launch_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, preexec_fn=os.setpgrp, env=env)
+ output = self.runqemu.stdout
+
+ #
+ # We need the preexec_fn above so that all runqemu processes can easily be killed
+ # (by killing their process group). This presents a problem if this controlling
+ # process itself is killed however since those processes don't notice the death
+ # of the parent and merrily continue on.
+ #
+ # Rather than hack runqemu to deal with this, we add something here instead.
+ # Basically we fork off another process which holds an open pipe to the parent
+ # and also is setpgrp. If/when the pipe sees EOF from the parent dieing, it kills
+ # the process group. This is like pctrl's PDEATHSIG but for a process group
+ # rather than a single process.
+ #
+ r, w = os.pipe()
+ self.monitorpid = os.fork()
+ if self.monitorpid:
+ os.close(r)
+ self.monitorpipe = os.fdopen(w, "w")
+ else:
+ # child process
+ os.setpgrp()
+ os.close(w)
+ r = os.fdopen(r)
+ x = r.read()
+ os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
+ sys.exit(0)
+
+ self.logger.debug("runqemu started, pid is %s" % self.runqemu.pid)
+ self.logger.debug("waiting at most %s seconds for qemu pid (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
+ endtime = time.time() + self.runqemutime
+ while not self.is_alive() and time.time() < endtime:
+ if self.runqemu.poll():
+ if self.runqemu.returncode:
+ # No point waiting any longer
+ self.logger.debug('runqemu exited with code %d' % self.runqemu.returncode)
+ self._dump_host()
+ self.stop()
+ self.logger.debug("Output from runqemu:\n%s" % self.getOutput(output))
+ return False
+ time.sleep(0.5)
+
+ if not self.is_alive():
+ self.logger.error("Qemu pid didn't appear in %s seconds (%s)" %
+ (self.runqemutime, time.strftime("%D %H:%M:%S")))
+ # Dump all processes to help us to figure out what is going on...
+ ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command '], stdout=subprocess.PIPE).communicate()[0]
+ processes = ps.decode("utf-8")
+ self.logger.debug("Running processes:\n%s" % processes)
+ self._dump_host()
+ self.stop()
+ op = self.getOutput(output)
+ if op:
+ self.logger.error("Output from runqemu:\n%s" % op)
+ else:
+ self.logger.error("No output from runqemu.\n")
+ return False
+
+ # We are alive: qemu is running
+ out = self.getOutput(output)
+ netconf = False # network configuration is not required by default
+ self.logger.debug("qemu started in %s seconds - qemu procces pid is %s (%s)" %
+ (time.time() - (endtime - self.runqemutime),
+ self.qemupid, time.strftime("%D %H:%M:%S")))
+ if get_ip:
+ cmdline = ''
+ with open('/proc/%s/cmdline' % self.qemupid) as p:
+ cmdline = p.read()
+ # It is needed to sanitize the data received
+ # because is possible to have control characters
+ cmdline = re_control_char.sub(' ', cmdline)
+ try:
+ ips = re.findall("((?:[0-9]{1,3}\.){3}[0-9]{1,3})", cmdline.split("ip=")[1])
+ self.ip = ips[0]
+ self.server_ip = ips[1]
+ self.logger.debug("qemu cmdline used:\n{}".format(cmdline))
+ except (IndexError, ValueError):
+ # Try to get network configuration from runqemu output
+ match = re.match('.*Network configuration: ([0-9.]+)::([0-9.]+):([0-9.]+)$.*',
+ out, re.MULTILINE|re.DOTALL)
+ if match:
+ self.ip, self.server_ip, self.netmask = match.groups()
+ # network configuration is required as we couldn't get it
+ # from the runqemu command line, so qemu doesn't run kernel
+ # and guest networking is not configured
+ netconf = True
+ else:
+ self.logger.error("Couldn't get ip from qemu command line and runqemu output! "
+ "Here is the qemu command line used:\n%s\n"
+ "and output from runqemu:\n%s" % (cmdline, out))
+ self._dump_host()
+ self.stop()
+ return False
+
+ self.logger.debug("Target IP: %s" % self.ip)
+ self.logger.debug("Server IP: %s" % self.server_ip)
+
+ self.thread = LoggingThread(self.log, threadsock, self.logger)
+ self.thread.start()
+ if not self.thread.connection_established.wait(self.boottime):
+ self.logger.error("Didn't receive a console connection from qemu. "
+ "Here is the qemu command line used:\n%s\nand "
+ "output from runqemu:\n%s" % (cmdline, out))
+ self.stop_thread()
+ return False
+
+ self.logger.debug("Output from runqemu:\n%s", out)
+ self.logger.debug("Waiting at most %d seconds for login banner (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
+ endtime = time.time() + self.boottime
+ socklist = [self.server_socket]
+ reachedlogin = False
+ stopread = False
+ qemusock = None
+ bootlog = b''
+ data = b''
+ while time.time() < endtime and not stopread:
+ try:
+ sread, swrite, serror = select.select(socklist, [], [], 5)
+ except InterruptedError:
+ continue
+ for sock in sread:
+ if sock is self.server_socket:
+ qemusock, addr = self.server_socket.accept()
+ qemusock.setblocking(0)
+ socklist.append(qemusock)
+ socklist.remove(self.server_socket)
+ self.logger.debug("Connection from %s:%s" % addr)
+ else:
+ data = data + sock.recv(1024)
+ if data:
+ bootlog += data
+ data = b''
+ if b' login:' in bootlog:
+ self.server_socket = qemusock
+ stopread = True
+ reachedlogin = True
+ self.logger.debug("Reached login banner in %s seconds (%s)" %
+ (time.time() - (endtime - self.boottime),
+ time.strftime("%D %H:%M:%S")))
+ else:
+ # no need to check if reachedlogin unless we support multiple connections
+ self.logger.debug("QEMU socket disconnected before login banner reached. (%s)" %
+ time.strftime("%D %H:%M:%S"))
+ socklist.remove(sock)
+ sock.close()
+ stopread = True
+
+
+ if not reachedlogin:
+ if time.time() >= endtime:
+ self.logger.debug("Target didn't reach login banner in %d seconds (%s)" %
+ (self.boottime, time.strftime("%D %H:%M:%S")))
+ tail = lambda l: "\n".join(l.splitlines()[-25:])
+ # in case bootlog is empty, use tail qemu log store at self.msg
+ lines = tail(bootlog if bootlog else self.msg)
+ self.logger.debug("Last 25 lines of text:\n%s" % lines)
+ self.logger.debug("Check full boot log: %s" % self.logfile)
+ self._dump_host()
+ self.stop()
+ return False
+
+ # If we are not able to login the tests can continue
+ try:
+ (status, output) = self.run_serial("root\n", raw=True)
+ if re.search("root@[a-zA-Z0-9\-]+:~#", output):
+ self.logged = True
+ self.logger.debug("Logged as root in serial console")
+ if netconf:
+ # configure guest networking
+ cmd = "ifconfig eth0 %s netmask %s up\n" % (self.ip, self.netmask)
+ output = self.run_serial(cmd, raw=True)[1]
+ if re.search("root@[a-zA-Z0-9\-]+:~#", output):
+ self.logger.debug("configured ip address %s", self.ip)
+ else:
+ self.logger.debug("Couldn't configure guest networking")
+ else:
+ self.logger.debug("Couldn't login into serial console"
+ " as root using blank password")
+ except:
+ self.logger.debug("Serial console failed while trying to login")
+ return True
+
+ def stop(self):
+ self.stop_thread()
+ self.stop_qemu_system()
+ if hasattr(self, "origchldhandler"):
+ signal.signal(signal.SIGCHLD, self.origchldhandler)
+ if self.runqemu:
+ if hasattr(self, "monitorpid"):
+ os.kill(self.monitorpid, signal.SIGKILL)
+ self.logger.debug("Sending SIGTERM to runqemu")
+ try:
+ os.killpg(os.getpgid(self.runqemu.pid), signal.SIGTERM)
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+ endtime = time.time() + self.runqemutime
+ while self.runqemu.poll() is None and time.time() < endtime:
+ time.sleep(1)
+ if self.runqemu.poll() is None:
+ self.logger.debug("Sending SIGKILL to runqemu")
+ os.killpg(os.getpgid(self.runqemu.pid), signal.SIGKILL)
+ self.runqemu = None
+ if hasattr(self, 'server_socket') and self.server_socket:
+ self.server_socket.close()
+ self.server_socket = None
+ self.qemupid = None
+ self.ip = None
+ if os.path.exists(self.qemu_pidfile):
+ os.remove(self.qemu_pidfile)
+
+ def stop_qemu_system(self):
+ if self.qemupid:
+ try:
+ # qemu-system behaves well and a SIGTERM is enough
+ os.kill(self.qemupid, signal.SIGTERM)
+ except ProcessLookupError as e:
+ self.logger.warn('qemu-system ended unexpectedly')
+
+ def stop_thread(self):
+ if self.thread and self.thread.is_alive():
+ self.thread.stop()
+ self.thread.join()
+
+ def restart(self, qemuparams = None):
+ self.logger.debug("Restarting qemu process")
+ if self.runqemu.poll() is None:
+ self.stop()
+ if self.start(qemuparams):
+ return True
+ return False
+
+ def is_alive(self):
+ if not self.runqemu:
+ return False
+ if os.path.isfile(self.qemu_pidfile):
+ f = open(self.qemu_pidfile, 'r')
+ qemu_pid = f.read()
+ f.close()
+ qemupid = int(qemu_pid)
+ if os.path.exists("/proc/" + str(qemupid)):
+ self.qemupid = qemupid
+ return True
+ return False
+
+ def run_serial(self, command, raw=False, timeout=5):
+ # We assume target system have echo to get command status
+ if not raw:
+ command = "%s; echo $?\n" % command
+
+ data = ''
+ status = 0
+ self.server_socket.sendall(command.encode('utf-8'))
+ start = time.time()
+ end = start + timeout
+ while True:
+ now = time.time()
+ if now >= end:
+ data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
+ break
+ try:
+ sread, _, _ = select.select([self.server_socket],[],[], end - now)
+ except InterruptedError:
+ continue
+ if sread:
+ answer = self.server_socket.recv(1024)
+ if answer:
+ data += answer.decode('utf-8')
+ # Search the prompt to stop
+ if re.search("[a-zA-Z0-9]+@[a-zA-Z0-9\-]+:~#", data):
+ break
+ else:
+ raise Exception("No data on serial console socket")
+
+ if data:
+ if raw:
+ status = 1
+ else:
+ # Remove first line (command line) and last line (prompt)
+ data = data[data.find('$?\r\n')+4:data.rfind('\r\n')]
+ index = data.rfind('\r\n')
+ if index == -1:
+ status_cmd = data
+ data = ""
+ else:
+ status_cmd = data[index+2:]
+ data = data[:index]
+ if (status_cmd == "0"):
+ status = 1
+ return (status, str(data))
+
+
+ def _dump_host(self):
+ self.host_dumper.create_dir("qemu")
+ self.logger.warn("Qemu ended unexpectedly, dump data from host"
+ " is in %s" % self.host_dumper.dump_dir)
+ self.host_dumper.dump_host()
+
+# This class is for reading data from a socket and passing it to logfunc
+# to be processed. It's completely event driven and has a straightforward
+# event loop. The mechanism for stopping the thread is a simple pipe which
+# will wake up the poll and allow for tearing everything down.
+class LoggingThread(threading.Thread):
+ def __init__(self, logfunc, sock, logger):
+ self.connection_established = threading.Event()
+ self.serversock = sock
+ self.logfunc = logfunc
+ self.logger = logger
+ self.readsock = None
+ self.running = False
+
+ self.errorevents = select.POLLERR | select.POLLHUP | select.POLLNVAL
+ self.readevents = select.POLLIN | select.POLLPRI
+
+ threading.Thread.__init__(self, target=self.threadtarget)
+
+ def threadtarget(self):
+ try:
+ self.eventloop()
+ finally:
+ self.teardown()
+
+ def run(self):
+ self.logger.debug("Starting logging thread")
+ self.readpipe, self.writepipe = os.pipe()
+ threading.Thread.run(self)
+
+ def stop(self):
+ self.logger.debug("Stopping logging thread")
+ if self.running:
+ os.write(self.writepipe, bytes("stop", "utf-8"))
+
+ def teardown(self):
+ self.logger.debug("Tearing down logging thread")
+ self.close_socket(self.serversock)
+
+ if self.readsock is not None:
+ self.close_socket(self.readsock)
+
+ self.close_ignore_error(self.readpipe)
+ self.close_ignore_error(self.writepipe)
+ self.running = False
+
+ def eventloop(self):
+ poll = select.poll()
+ event_read_mask = self.errorevents | self.readevents
+ poll.register(self.serversock.fileno())
+ poll.register(self.readpipe, event_read_mask)
+
+ breakout = False
+ self.running = True
+ self.logger.debug("Starting thread event loop")
+ while not breakout:
+ events = poll.poll()
+ for event in events:
+ # An error occurred, bail out
+ if event[1] & self.errorevents:
+ raise Exception(self.stringify_event(event[1]))
+
+ # Event to stop the thread
+ if self.readpipe == event[0]:
+ self.logger.debug("Stop event received")
+ breakout = True
+ break
+
+ # A connection request was received
+ elif self.serversock.fileno() == event[0]:
+ self.logger.debug("Connection request received")
+ self.readsock, _ = self.serversock.accept()
+ self.readsock.setblocking(0)
+ poll.unregister(self.serversock.fileno())
+ poll.register(self.readsock.fileno(), event_read_mask)
+
+ self.logger.debug("Setting connection established event")
+ self.connection_established.set()
+
+ # Actual data to be logged
+ elif self.readsock.fileno() == event[0]:
+ data = self.recv(1024)
+ self.logfunc(data)
+
+ # Since the socket is non-blocking make sure to honor EAGAIN
+ # and EWOULDBLOCK.
+ def recv(self, count):
+ try:
+ data = self.readsock.recv(count)
+ except socket.error as e:
+ if e.errno == errno.EAGAIN or e.errno == errno.EWOULDBLOCK:
+ return ''
+ else:
+ raise
+
+ if data is None:
+ raise Exception("No data on read ready socket")
+ elif not data:
+ # This actually means an orderly shutdown
+ # happened. But for this code it counts as an
+ # error since the connection shouldn't go away
+ # until qemu exits.
+ raise Exception("Console connection closed unexpectedly")
+
+ return data
+
+ def stringify_event(self, event):
+ val = ''
+ if select.POLLERR == event:
+ val = 'POLLER'
+ elif select.POLLHUP == event:
+ val = 'POLLHUP'
+ elif select.POLLNVAL == event:
+ val = 'POLLNVAL'
+ return val
+
+ def close_socket(self, sock):
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
+
+ def close_ignore_error(self, fd):
+ try:
+ os.close(fd)
+ except OSError:
+ pass
diff --git a/poky/meta/lib/oeqa/utils/qemutinyrunner.py b/poky/meta/lib/oeqa/utils/qemutinyrunner.py
new file mode 100644
index 000000000..63b5d1648
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/qemutinyrunner.py
@@ -0,0 +1,176 @@
+# Copyright (C) 2015 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# This module provides a class for starting qemu images of poky tiny.
+# It's used by testimage.bbclass.
+
+import subprocess
+import os
+import time
+import signal
+import re
+import socket
+import select
+import bb
+from .qemurunner import QemuRunner
+
+class QemuTinyRunner(QemuRunner):
+
+ def __init__(self, machine, rootfs, display, tmpdir, deploy_dir_image, logfile, kernel, boottime, logger):
+
+ # Popen object for runqemu
+ self.runqemu = None
+ # pid of the qemu process that runqemu will start
+ self.qemupid = None
+ # target ip - from the command line
+ self.ip = None
+ # host ip - where qemu is running
+ self.server_ip = None
+
+ self.machine = machine
+ self.rootfs = rootfs
+ self.display = display
+ self.tmpdir = tmpdir
+ self.deploy_dir_image = deploy_dir_image
+ self.logfile = logfile
+ self.boottime = boottime
+
+ self.runqemutime = 60
+ self.socketfile = "console.sock"
+ self.server_socket = None
+ self.kernel = kernel
+ self.logger = logger
+
+
+ def create_socket(self):
+ tries = 3
+ while tries > 0:
+ try:
+ self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.server_socket.connect(self.socketfile)
+ bb.note("Created listening socket for qemu serial console.")
+ tries = 0
+ except socket.error as msg:
+ self.server_socket.close()
+ bb.fatal("Failed to create listening socket.")
+ tries -= 1
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s" % msg)
+
+ def start(self, qemuparams = None, ssh=True, extra_bootparams=None, runqemuparams='', discard_writes=True):
+
+ if self.display:
+ os.environ["DISPLAY"] = self.display
+ else:
+ bb.error("To start qemu I need a X desktop, please set DISPLAY correctly (e.g. DISPLAY=:1)")
+ return False
+ if not os.path.exists(self.rootfs):
+ bb.error("Invalid rootfs %s" % self.rootfs)
+ return False
+ if not os.path.exists(self.tmpdir):
+ bb.error("Invalid TMPDIR path %s" % self.tmpdir)
+ return False
+ else:
+ os.environ["OE_TMPDIR"] = self.tmpdir
+ if not os.path.exists(self.deploy_dir_image):
+ bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
+ return False
+ else:
+ os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
+
+ # Set this flag so that Qemu doesn't do any grabs as SDL grabs interact
+ # badly with screensavers.
+ os.environ["QEMU_DONT_GRAB"] = "1"
+ self.qemuparams = '--append "root=/dev/ram0 console=ttyS0" -nographic -serial unix:%s,server,nowait' % self.socketfile
+
+ launch_cmd = 'qemu-system-i386 -kernel %s -initrd %s %s' % (self.kernel, self.rootfs, self.qemuparams)
+ self.runqemu = subprocess.Popen(launch_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,preexec_fn=os.setpgrp)
+
+ bb.note("runqemu started, pid is %s" % self.runqemu.pid)
+ bb.note("waiting at most %s seconds for qemu pid" % self.runqemutime)
+ endtime = time.time() + self.runqemutime
+ while not self.is_alive() and time.time() < endtime:
+ time.sleep(1)
+
+ if self.is_alive():
+ bb.note("qemu started - qemu procces pid is %s" % self.qemupid)
+ self.create_socket()
+ else:
+ bb.note("Qemu pid didn't appeared in %s seconds" % self.runqemutime)
+ output = self.runqemu.stdout
+ self.stop()
+ bb.note("Output from runqemu:\n%s" % output.read().decode("utf-8"))
+ return False
+
+ return self.is_alive()
+
+ def run_serial(self, command, timeout=5):
+ self.server_socket.sendall(command+'\n')
+ data = ''
+ status = 0
+ stopread = False
+ endtime = time.time()+timeout
+ while time.time()<endtime and not stopread:
+ try:
+ sread, _, _ = select.select([self.server_socket],[],[],1)
+ except InterruptedError:
+ continue
+ for sock in sread:
+ answer = sock.recv(1024)
+ if answer:
+ data += answer
+ else:
+ sock.close()
+ stopread = True
+ if not data:
+ status = 1
+ if not stopread:
+ data += "<<< run_serial(): command timed out after %d seconds without output >>>\r\n\r\n" % timeout
+ return (status, str(data))
+
+ def find_child(self,parent_pid):
+ #
+ # Walk the process tree from the process specified looking for a qemu-system. Return its [pid'cmd]
+ #
+ ps = subprocess.Popen(['ps', 'axww', '-o', 'pid,ppid,command'], stdout=subprocess.PIPE).communicate()[0]
+ processes = ps.decode("utf-8").split('\n')
+ nfields = len(processes[0].split()) - 1
+ pids = {}
+ commands = {}
+ for row in processes[1:]:
+ data = row.split(None, nfields)
+ if len(data) != 3:
+ continue
+ if data[1] not in pids:
+ pids[data[1]] = []
+
+ pids[data[1]].append(data[0])
+ commands[data[0]] = data[2]
+
+ if parent_pid not in pids:
+ return []
+
+ parents = []
+ newparents = pids[parent_pid]
+ while newparents:
+ next = []
+ for p in newparents:
+ if p in pids:
+ for n in pids[p]:
+ if n not in parents and n not in next:
+ next.append(n)
+ if p not in parents:
+ parents.append(p)
+ newparents = next
+ #print("Children matching %s:" % str(parents))
+ for p in parents:
+ # Need to be careful here since runqemu runs "ldd qemu-system-xxxx"
+ # Also, old versions of ldd (2.11) run "LD_XXXX qemu-system-xxxx"
+ basecmd = commands[p].split()[0]
+ basecmd = os.path.basename(basecmd)
+ if "qemu-system" in basecmd and "-serial unix" in commands[p]:
+ return [int(p),commands[p]]
diff --git a/poky/meta/lib/oeqa/utils/sshcontrol.py b/poky/meta/lib/oeqa/utils/sshcontrol.py
new file mode 100644
index 000000000..d292893c0
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/sshcontrol.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Provides a class for setting up ssh connections,
+# running commands and copying files to/from a target.
+# It's used by testimage.bbclass and tests in lib/oeqa/runtime.
+
+import subprocess
+import time
+import os
+import select
+
+
+class SSHProcess(object):
+ def __init__(self, **options):
+
+ self.defaultopts = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.STDOUT,
+ "stdin": None,
+ "shell": False,
+ "bufsize": -1,
+ "preexec_fn": os.setsid,
+ }
+ self.options = dict(self.defaultopts)
+ self.options.update(options)
+ self.status = None
+ self.output = None
+ self.process = None
+ self.starttime = None
+ self.logfile = None
+
+ # Unset DISPLAY which means we won't trigger SSH_ASKPASS
+ env = os.environ.copy()
+ if "DISPLAY" in env:
+ del env['DISPLAY']
+ self.options['env'] = env
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s" % msg)
+
+ def _run(self, command, timeout=None, logfile=None):
+ self.logfile = logfile
+ self.starttime = time.time()
+ output = ''
+ self.process = subprocess.Popen(command, **self.options)
+ if timeout:
+ endtime = self.starttime + timeout
+ eof = False
+ while time.time() < endtime and not eof:
+ try:
+ if select.select([self.process.stdout], [], [], 5)[0] != []:
+ data = os.read(self.process.stdout.fileno(), 1024)
+ if not data:
+ self.process.stdout.close()
+ eof = True
+ else:
+ data = data.decode("utf-8")
+ output += data
+ self.log(data)
+ endtime = time.time() + timeout
+ except InterruptedError:
+ continue
+
+ # process hasn't returned yet
+ if not eof:
+ self.process.terminate()
+ time.sleep(5)
+ try:
+ self.process.kill()
+ except OSError:
+ pass
+ lastline = "\nProcess killed - no output for %d seconds. Total running time: %d seconds." % (timeout, time.time() - self.starttime)
+ self.log(lastline)
+ output += lastline
+ else:
+ output = self.process.communicate()[0]
+ self.log(output.rstrip())
+
+ self.status = self.process.wait()
+ self.output = output.rstrip()
+
+ def run(self, command, timeout=None, logfile=None):
+ try:
+ self._run(command, timeout, logfile)
+ except:
+ # Need to guard against a SystemExit or other exception occuring whilst running
+ # and ensure we don't leave a process behind.
+ if self.process.poll() is None:
+ self.process.kill()
+ self.status = self.process.wait()
+ raise
+ return (self.status, self.output)
+
+class SSHControl(object):
+ def __init__(self, ip, logfile=None, timeout=300, user='root', port=None):
+ self.ip = ip
+ self.defaulttimeout = timeout
+ self.ignore_status = True
+ self.logfile = logfile
+ self.user = user
+ self.ssh_options = [
+ '-o', 'UserKnownHostsFile=/dev/null',
+ '-o', 'StrictHostKeyChecking=no',
+ '-o', 'LogLevel=ERROR'
+ ]
+ self.ssh = ['ssh', '-l', self.user ] + self.ssh_options
+ self.scp = ['scp'] + self.ssh_options
+ if port:
+ self.ssh = self.ssh + [ '-p', port ]
+ self.scp = self.scp + [ '-P', port ]
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s\n" % msg)
+
+ def _internal_run(self, command, timeout=None, ignore_status = True):
+ self.log("[Running]$ %s" % " ".join(command))
+
+ proc = SSHProcess()
+ status, output = proc.run(command, timeout, logfile=self.logfile)
+
+ self.log("[Command returned '%d' after %.2f seconds]" % (status, time.time() - proc.starttime))
+
+ if status and not ignore_status:
+ raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, status, output))
+
+ return (status, output)
+
+ def run(self, command, timeout=None):
+ """
+ command - ssh command to run
+ timeout=<val> - kill command if there is no output after <val> seconds
+ timeout=None - kill command if there is no output after a default value seconds
+ timeout=0 - no timeout, let command run until it returns
+ """
+
+ command = self.ssh + [self.ip, 'export PATH=/usr/sbin:/sbin:/usr/bin:/bin; ' + command]
+
+ if timeout is None:
+ return self._internal_run(command, self.defaulttimeout, self.ignore_status)
+ if timeout == 0:
+ return self._internal_run(command, None, self.ignore_status)
+ return self._internal_run(command, timeout, self.ignore_status)
+
+ def copy_to(self, localpath, remotepath):
+ if os.path.islink(localpath):
+ localpath = os.path.dirname(localpath) + "/" + os.readlink(localpath)
+ command = self.scp + [localpath, '%s@%s:%s' % (self.user, self.ip, remotepath)]
+ return self._internal_run(command, ignore_status=False)
+
+ def copy_from(self, remotepath, localpath):
+ command = self.scp + ['%s@%s:%s' % (self.user, self.ip, remotepath), localpath]
+ return self._internal_run(command, ignore_status=False)
+
+ def copy_dir_to(self, localpath, remotepath):
+ """
+ Copy recursively localpath directory to remotepath in target.
+ """
+
+ for root, dirs, files in os.walk(localpath):
+ # Create directories in the target as needed
+ for d in dirs:
+ tmp_dir = os.path.join(root, d).replace(localpath, "")
+ new_dir = os.path.join(remotepath, tmp_dir.lstrip("/"))
+ cmd = "mkdir -p %s" % new_dir
+ self.run(cmd)
+
+ # Copy files into the target
+ for f in files:
+ tmp_file = os.path.join(root, f).replace(localpath, "")
+ dst_file = os.path.join(remotepath, tmp_file.lstrip("/"))
+ src_file = os.path.join(root, f)
+ self.copy_to(src_file, dst_file)
+
+
+ def delete_files(self, remotepath, files):
+ """
+ Delete files in target's remote path.
+ """
+
+ cmd = "rm"
+ if not isinstance(files, list):
+ files = [files]
+
+ for f in files:
+ cmd = "%s %s" % (cmd, os.path.join(remotepath, f))
+
+ self.run(cmd)
+
+
+ def delete_dir(self, remotepath):
+ """
+ Delete remotepath directory in target.
+ """
+
+ cmd = "rmdir %s" % remotepath
+ self.run(cmd)
+
+
+ def delete_dir_structure(self, localpath, remotepath):
+ """
+ Delete recursively localpath structure directory in target's remotepath.
+
+ This function is very usefult to delete a package that is installed in
+ the DUT and the host running the test has such package extracted in tmp
+ directory.
+
+ Example:
+ pwd: /home/user/tmp
+ tree: .
+ └── work
+ ├── dir1
+ │   └── file1
+ └── dir2
+
+ localpath = "/home/user/tmp" and remotepath = "/home/user"
+
+ With the above variables this function will try to delete the
+ directory in the DUT in this order:
+ /home/user/work/dir1/file1
+ /home/user/work/dir1 (if dir is empty)
+ /home/user/work/dir2 (if dir is empty)
+ /home/user/work (if dir is empty)
+ """
+
+ for root, dirs, files in os.walk(localpath, topdown=False):
+ # Delete files first
+ tmpdir = os.path.join(root).replace(localpath, "")
+ remotedir = os.path.join(remotepath, tmpdir.lstrip("/"))
+ self.delete_files(remotedir, files)
+
+ # Remove dirs if empty
+ for d in dirs:
+ tmpdir = os.path.join(root, d).replace(localpath, "")
+ remotedir = os.path.join(remotepath, tmpdir.lstrip("/"))
+ self.delete_dir(remotepath)
diff --git a/poky/meta/lib/oeqa/utils/subprocesstweak.py b/poky/meta/lib/oeqa/utils/subprocesstweak.py
new file mode 100644
index 000000000..1f7d11b55
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/subprocesstweak.py
@@ -0,0 +1,19 @@
+import subprocess
+
+class OETestCalledProcessError(subprocess.CalledProcessError):
+ def __str__(self):
+ def strify(o):
+ if isinstance(o, bytes):
+ return o.decode("utf-8", errors="replace")
+ else:
+ return o
+
+ s = "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
+ if hasattr(self, "output") and self.output:
+ s = s + "\nStandard Output: " + strify(self.output)
+ if hasattr(self, "stderr") and self.stderr:
+ s = s + "\nStandard Error: " + strify(self.stderr)
+ return s
+
+def errors_have_output():
+ subprocess.CalledProcessError = OETestCalledProcessError
diff --git a/poky/meta/lib/oeqa/utils/targetbuild.py b/poky/meta/lib/oeqa/utils/targetbuild.py
new file mode 100644
index 000000000..1202d579f
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/targetbuild.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2013 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Provides a class for automating build tests for projects
+
+import os
+import re
+import bb.utils
+import subprocess
+import tempfile
+from abc import ABCMeta, abstractmethod
+
+class BuildProject(metaclass=ABCMeta):
+
+ def __init__(self, d, uri, foldername=None, tmpdir=None):
+ self.d = d
+ self.uri = uri
+ self.archive = os.path.basename(uri)
+ if not tmpdir:
+ tmpdir = self.d.getVar('WORKDIR')
+ if not tmpdir:
+ tmpdir = tempfile.mkdtemp(prefix='buildproject')
+ self.localarchive = os.path.join(tmpdir,self.archive)
+ if foldername:
+ self.fname = foldername
+ else:
+ self.fname = re.sub(r'\.tar\.bz2$|\.tar\.gz$|\.tar\.xz$', '', self.archive)
+
+ # Download self.archive to self.localarchive
+ def _download_archive(self):
+ dl_dir = self.d.getVar("DL_DIR")
+ if dl_dir and os.path.exists(os.path.join(dl_dir, self.archive)):
+ bb.utils.copyfile(os.path.join(dl_dir, self.archive), self.localarchive)
+ return
+
+ exportvars = ['HTTP_PROXY', 'http_proxy',
+ 'HTTPS_PROXY', 'https_proxy',
+ 'FTP_PROXY', 'ftp_proxy',
+ 'FTPS_PROXY', 'ftps_proxy',
+ 'NO_PROXY', 'no_proxy',
+ 'ALL_PROXY', 'all_proxy',
+ 'SOCKS5_USER', 'SOCKS5_PASSWD']
+
+ cmd = ''
+ for var in exportvars:
+ val = self.d.getVar(var)
+ if val:
+ cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
+
+ cmd = cmd + "wget -O %s %s" % (self.localarchive, self.uri)
+ subprocess.check_output(cmd, shell=True)
+
+ # This method should provide a way to run a command in the desired environment.
+ @abstractmethod
+ def _run(self, cmd):
+ pass
+
+ # The timeout parameter of target.run is set to 0 to make the ssh command
+ # run with no timeout.
+ def run_configure(self, configure_args='', extra_cmds=''):
+ return self._run('cd %s; %s ./configure %s' % (self.targetdir, extra_cmds, configure_args))
+
+ def run_make(self, make_args=''):
+ return self._run('cd %s; make %s' % (self.targetdir, make_args))
+
+ def run_install(self, install_args=''):
+ return self._run('cd %s; make install %s' % (self.targetdir, install_args))
+
+ def clean(self):
+ self._run('rm -rf %s' % self.targetdir)
+ subprocess.check_call('rm -f %s' % self.localarchive, shell=True)
+ pass
+
+class TargetBuildProject(BuildProject):
+
+ def __init__(self, target, d, uri, foldername=None):
+ self.target = target
+ self.targetdir = "~/"
+ BuildProject.__init__(self, d, uri, foldername)
+
+ def download_archive(self):
+
+ self._download_archive()
+
+ (status, output) = self.target.copy_to(self.localarchive, self.targetdir)
+ if status != 0:
+ raise Exception("Failed to copy archive to target, output: %s" % output)
+
+ (status, output) = self.target.run('tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir))
+ if status != 0:
+ raise Exception("Failed to extract archive, output: %s" % output)
+
+ #Change targetdir to project folder
+ self.targetdir = self.targetdir + self.fname
+
+ # The timeout parameter of target.run is set to 0 to make the ssh command
+ # run with no timeout.
+ def _run(self, cmd):
+ return self.target.run(cmd, 0)[0]
+
+
+class SDKBuildProject(BuildProject):
+
+ def __init__(self, testpath, sdkenv, d, uri, foldername=None):
+ self.sdkenv = sdkenv
+ self.testdir = testpath
+ self.targetdir = testpath
+ bb.utils.mkdirhier(testpath)
+ self.datetime = d.getVar('DATETIME')
+ self.testlogdir = d.getVar("TEST_LOG_DIR")
+ bb.utils.mkdirhier(self.testlogdir)
+ self.logfile = os.path.join(self.testlogdir, "sdk_target_log.%s" % self.datetime)
+ BuildProject.__init__(self, d, uri, foldername, tmpdir=testpath)
+
+ def download_archive(self):
+
+ self._download_archive()
+
+ cmd = 'tar xf %s%s -C %s' % (self.targetdir, self.archive, self.targetdir)
+ subprocess.check_output(cmd, shell=True)
+
+ #Change targetdir to project folder
+ self.targetdir = os.path.join(self.targetdir, self.fname)
+
+ def run_configure(self, configure_args='', extra_cmds=' gnu-configize; '):
+ return super(SDKBuildProject, self).run_configure(configure_args=(configure_args or '$CONFIGURE_FLAGS'), extra_cmds=extra_cmds)
+
+ def run_install(self, install_args=''):
+ return super(SDKBuildProject, self).run_install(install_args=(install_args or "DESTDIR=%s/../install" % self.targetdir))
+
+ def log(self, msg):
+ if self.logfile:
+ with open(self.logfile, "a") as f:
+ f.write("%s\n" % msg)
+
+ def _run(self, cmd):
+ self.log("Running . %s; " % self.sdkenv + cmd)
+ return subprocess.check_call(". %s; " % self.sdkenv + cmd, shell=True)
diff --git a/poky/meta/lib/oeqa/utils/testexport.py b/poky/meta/lib/oeqa/utils/testexport.py
new file mode 100644
index 000000000..be2a2110f
--- /dev/null
+++ b/poky/meta/lib/oeqa/utils/testexport.py
@@ -0,0 +1,263 @@
+# Copyright (C) 2015 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# Provides functions to help with exporting binaries obtained from built targets
+
+import os, re, glob as g, shutil as sh,sys
+from time import sleep
+from .commands import runCmd
+from difflib import SequenceMatcher as SM
+
+try:
+ import bb
+except ImportError:
+ class my_log():
+ def __init__(self):
+ pass
+ def plain(self, msg):
+ if msg:
+ print(msg)
+ def warn(self, msg):
+ if msg:
+ print("WARNING: " + msg)
+ def fatal(self, msg):
+ if msg:
+ print("FATAL:" + msg)
+ sys.exit(1)
+ bb = my_log()
+
+
+def determine_if_poky_env():
+ """
+ used to determine if we are inside the poky env or not. Usefull for remote machine where poky is not present
+ """
+ check_env = True if ("/scripts" and "/bitbake/bin") in os.getenv("PATH") else False
+ return check_env
+
+
+def get_dest_folder(tune_features, folder_list):
+ """
+ Function to determine what rpm deploy dir to choose for a given architecture based on TUNE_FEATURES
+ """
+ features_list = tune_features.split(" ")
+ features_list.reverse()
+ features_list = "_".join(features_list)
+ match_rate = 0
+ best_match = None
+ for folder in folder_list:
+ curr_match_rate = SM(None, folder, features_list).ratio()
+ if curr_match_rate > match_rate:
+ match_rate = curr_match_rate
+ best_match = folder
+ return best_match
+
+
+def process_binaries(d, params):
+ param_list = params
+ export_env = d.getVar("TEST_EXPORT_ONLY")
+
+ def extract_binary(pth_to_pkg, dest_pth=None):
+ cpio_command = runCmd("which cpio")
+ rpm2cpio_command = runCmd("ls /usr/bin/rpm2cpio")
+ if (cpio_command.status != 0) and (rpm2cpio_command.status != 0):
+ bb.fatal("Either \"rpm2cpio\" or \"cpio\" tools are not available on your system."
+ "All binaries extraction processes will not be available, crashing all related tests."
+ "Please install them according to your OS recommendations") # will exit here
+ if dest_pth:
+ os.chdir(dest_pth)
+ else:
+ os.chdir("%s" % os.sep)# this is for native package
+ extract_bin_command = runCmd("%s %s | %s -idm" % (rpm2cpio_command.output, pth_to_pkg, cpio_command.output)) # semi-hardcoded because of a bug on poky's rpm2cpio
+ return extract_bin_command
+
+ if determine_if_poky_env(): # machine with poky environment
+ exportpath = d.getVar("TEST_EXPORT_DIR") if export_env else d.getVar("DEPLOY_DIR")
+ rpm_deploy_dir = d.getVar("DEPLOY_DIR_RPM")
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(rpm_deploy_dir))
+ arch_rpm_dir = os.path.join(rpm_deploy_dir, arch)
+ extracted_bin_dir = os.path.join(exportpath,"binaries", arch, "extracted_binaries")
+ packaged_bin_dir = os.path.join(exportpath,"binaries", arch, "packaged_binaries")
+ # creating necessary directory structure in case testing is done in poky env.
+ if export_env == "0":
+ if not os.path.exists(extracted_bin_dir): bb.utils.mkdirhier(extracted_bin_dir)
+ if not os.path.exists(packaged_bin_dir): bb.utils.mkdirhier(packaged_bin_dir)
+
+ if param_list[3] == "native":
+ if export_env == "1": #this is a native package and we only need to copy it. no need for extraction
+ native_rpm_dir = os.path.join(rpm_deploy_dir, get_dest_folder("{} nativesdk".format(d.getVar("BUILD_SYS")), os.listdir(rpm_deploy_dir)))
+ native_rpm_file_list = [item for item in os.listdir(native_rpm_dir) if re.search("nativesdk-" + param_list[0] + "-([0-9]+\.*)", item)]
+ if not native_rpm_file_list:
+ bb.warn("Couldn't find any version of {} native package. Related tests will most probably fail.".format(param_list[0]))
+ return ""
+ for item in native_rpm_file_list:# will copy all versions of package. Used version will be selected on remote machine
+ bb.plain("Copying native package file: %s" % item)
+ sh.copy(os.path.join(rpm_deploy_dir, native_rpm_dir, item), os.path.join(d.getVar("TEST_EXPORT_DIR"), "binaries", "native"))
+ else: # nothing to do here; running tests under bitbake, so we asume native binaries are in sysroots dir.
+ if param_list[1] or param_list[4]:
+ bb.warn("Native binary %s %s%s. Running tests under bitbake environment. Version can't be checked except when the test itself does it"
+ " and binary can't be removed."%(param_list[0],"has assigned ver. " + param_list[1] if param_list[1] else "",
+ ", is marked for removal" if param_list[4] else ""))
+ else:# the package is target aka DUT intended and it is either required to be delivered in an extracted form or in a packaged version
+ target_rpm_file_list = [item for item in os.listdir(arch_rpm_dir) if re.search(param_list[0] + "-([0-9]+\.*)", item)]
+ if not target_rpm_file_list:
+ bb.warn("Couldn't find any version of target package %s. Please ensure it was built. "
+ "Related tests will probably fail." % param_list[0])
+ return ""
+ if param_list[2] == "rpm": # binary should be deployed as rpm; (other, .deb, .ipk? ; in the near future)
+ for item in target_rpm_file_list: # copying all related rpm packages. "Intuition" reasons, someone may need other versions too. Deciding later on version
+ bb.plain("Copying target specific packaged file: %s" % item)
+ sh.copy(os.path.join(arch_rpm_dir, item), packaged_bin_dir)
+ return "copied"
+ else: # it is required to extract the binary
+ if param_list[1]: # the package is versioned
+ for item in target_rpm_file_list:
+ if re.match(".*-{}-.*\.rpm".format(param_list[1]), item):
+ destination = os.path.join(extracted_bin_dir,param_list[0], param_list[1])
+ bb.utils.mkdirhier(destination)
+ extract_binary(os.path.join(arch_rpm_dir, item), destination)
+ break
+ else:
+ bb.warn("Couldn't find the desired version %s for target binary %s. Related test cases will probably fail." % (param_list[1], param_list[0]))
+ return ""
+ return "extracted"
+ else: # no version provided, just extract one binary
+ destination = os.path.join(extracted_bin_dir,param_list[0],
+ re.search(".*-([0-9]+\.[0-9]+)-.*rpm", target_rpm_file_list[0]).group(1))
+ bb.utils.mkdirhier(destination)
+ extract_binary(os.path.join(arch_rpm_dir, target_rpm_file_list[0]), destination)
+ return "extracted"
+ else: # remote machine
+ binaries_path = os.getenv("bin_dir")# in order to know where the binaries are, bin_dir is set as env. variable
+ if param_list[3] == "native": #need to extract the native pkg here
+ native_rpm_dir = os.path.join(binaries_path, "native")
+ native_rpm_file_list = os.listdir(native_rpm_dir)
+ for item in native_rpm_file_list:
+ if param_list[1] and re.match("nativesdk-{}-{}-.*\.rpm".format(param_list[0], param_list[1]), item): # native package has version
+ extract_binary(os.path.join(native_rpm_dir, item))
+ break
+ else:# just copy any related native binary
+ found_version = re.match("nativesdk-{}-([0-9]+\.[0-9]+)-".format(param_list[0]), item).group(1)
+ if found_version:
+ extract_binary(os.path.join(native_rpm_dir, item))
+ else:
+ bb.warn("Couldn't find native package %s%s. Related test cases will be influenced." %
+ (param_list[0], " with version " + param_list[1] if param_list[1] else ""))
+ return
+
+ else: # this is for target device
+ if param_list[2] == "rpm":
+ return "No need to extract, this is an .rpm file"
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(binaries_path))
+ extracted_bin_path = os.path.join(binaries_path, arch, "extracted_binaries")
+ extracted_bin_list = [item for item in os.listdir(extracted_bin_path)]
+ packaged_bin_path = os.path.join(binaries_path, arch, "packaged_binaries")
+ packaged_bin_file_list = os.listdir(packaged_bin_path)
+ # see if the package is already in the extracted ones; maybe it was deployed when exported the env.
+ if os.path.exists(os.path.join(extracted_bin_path, param_list[0], param_list[1] if param_list[1] else "")):
+ return "binary %s is already extracted" % param_list[0]
+ else: # we need to search for it in the packaged binaries directory. It may have been shipped after export
+ for item in packaged_bin_file_list:
+ if param_list[1]:
+ if re.match("%s-%s.*rpm" % (param_list[0], param_list[1]), item): # package with version
+ if not os.path.exists(os.path.join(extracted_bin_path, param_list[0],param_list[1])):
+ os.makedirs(os.path.join(extracted_bin_path, param_list[0], param_list[1]))
+ extract_binary(os.path.join(packaged_bin_path, item), os.path.join(extracted_bin_path, param_list[0],param_list[1]))
+ bb.plain("Using {} for {}".format(os.path.join(packaged_bin_path, item), param_list[0]))
+ break
+ else:
+ if re.match("%s-.*rpm" % param_list[0], item):
+ found_version = re.match(".*-([0-9]+\.[0-9]+)-", item).group(1)
+ if not os.path.exists(os.path.join(extracted_bin_path, param_list[0], found_version)):
+ os.makedirs(os.path.join(extracted_bin_path, param_list[0], found_version))
+ bb.plain("Used ver. %s for %s" % (found_version, param_list[0]))
+ extract_binary(os.path.join(packaged_bin_path, item), os.path.join(extracted_bin_path, param_list[0], found_version))
+ break
+ else:
+ bb.warn("Couldn't find target package %s%s. Please ensure it is available "
+ "in either of these directories: extracted_binaries or packaged_binaries. "
+ "Related tests will probably fail." % (param_list[0], " with version " + param_list[1] if param_list[1] else ""))
+ return
+ return "Binary %s extracted successfully." % param_list[0]
+
+
+def files_to_copy(base_dir):
+ """
+ Produces a list of files relative to the base dir path sent as param
+ :return: the list of relative path files
+ """
+ files_list = []
+ dir_list = [base_dir]
+ count = 1
+ dir_count = 1
+ while (dir_count == 1 or dir_count != count):
+ count = dir_count
+ for dir in dir_list:
+ for item in os.listdir(dir):
+ if os.path.isdir(os.path.join(dir, item)) and os.path.join(dir, item) not in dir_list:
+ dir_list.append(os.path.join(dir, item))
+ dir_count = len(dir_list)
+ elif os.path.join(dir, item) not in files_list and os.path.isfile(os.path.join(dir, item)):
+ files_list.append(os.path.join(dir, item))
+ return files_list
+
+
+def send_bin_to_DUT(d,params):
+ from oeqa.oetest import oeRuntimeTest
+ param_list = params
+ cleanup_list = list()
+ bins_dir = os.path.join(d.getVar("TEST_EXPORT_DIR"), "binaries") if determine_if_poky_env() \
+ else os.getenv("bin_dir")
+ arch = get_dest_folder(d.getVar("TUNE_FEATURES"), os.listdir(bins_dir))
+ arch_rpms_dir = os.path.join(bins_dir, arch, "packaged_binaries")
+ extracted_bin_dir = os.path.join(bins_dir, arch, "extracted_binaries", param_list[0])
+
+ def send_extracted_binary():
+ bin_local_dir = os.path.join(extracted_bin_dir, param_list[1] if param_list[1] else os.listdir(extracted_bin_dir)[0])
+ for item in files_to_copy(bin_local_dir):
+ split_path = item.split(bin_local_dir)[1]
+ path_on_DUT = split_path if split_path[0] is "/" else "/" + split_path # create the path as on DUT; eg. /usr/bin/bin_file
+ (status, output) = oeRuntimeTest.tc.target.copy_to(item, path_on_DUT)
+ if status != 0:
+ bb.warn("Failed to copy %s binary file %s on the remote target: %s" %
+ (param_list[0], "ver. " + param_list[1] if param_list[1] else "", d.getVar("MACHINE")))
+ return
+ if param_list[4] == "rm":
+ cleanup_list.append(path_on_DUT)
+ return cleanup_list
+
+ def send_rpm(remote_path): # if it is not required to have an extracted binary, but to send an .rpm file
+ rpm_to_send = ""
+ for item in os.listdir(arch_rpms_dir):
+ if param_list[1] and re.match("%s-%s-.*rpm"%(param_list[0], param_list[1]), item):
+ rpm_to_send = item
+ break
+ elif re.match("%s-[0-9]+\.[0-9]+-.*rpm" % param_list[0], item):
+ rpm_to_send = item
+ break
+ else:
+ bb.warn("No rpm package found for %s %s in .rpm files dir %s. Skipping deployment." %
+ (param_list[0], "ver. " + param_list[1] if param_list[1] else "", rpms_file_dir) )
+ return
+ (status, output) = oeRuntimeTest.tc.target.copy_to(os.path.join(arch_rpms_dir, rpm_to_send), remote_path)
+ if status != 0:
+ bb.warn("Failed to copy %s on the remote target: %s" %(param_list[0], d.getVar("MACHINE")))
+ return
+ if param_list[4] == "rm":
+ cleanup_list.append(os.path.join(remote_path, rpm_to_send))
+ return cleanup_list
+
+ if param_list[2] == "rpm": # send an .rpm file
+ return send_rpm("/home/root") # rpms will be sent on home dir of remote machine
+ else:
+ return send_extracted_binary()
+
+
+def rm_bin(removal_list): # need to know both if the binary is sent archived and the path where it is sent if archived
+ from oeqa.oetest import oeRuntimeTest
+ for item in removal_list:
+ (status,output) = oeRuntimeTest.tc.target.run("rm " + item)
+ if status != 0:
+ bb.warn("Failed to remove: %s. Please ensure connection with the target device is up and running and "
+ "you have the needed rights." % item)
+
diff --git a/poky/meta/lib/rootfspostcommands.py b/poky/meta/lib/rootfspostcommands.py
new file mode 100644
index 000000000..4742e0613
--- /dev/null
+++ b/poky/meta/lib/rootfspostcommands.py
@@ -0,0 +1,56 @@
+import os
+
+def sort_file(filename, mapping):
+ """
+ Sorts a passwd or group file based on the numeric ID in the third column.
+ If a mapping is given, the name from the first column is mapped via that
+ dictionary instead (necessary for /etc/shadow and /etc/gshadow). If not,
+ a new mapping is created on the fly and returned.
+ """
+ new_mapping = {}
+ with open(filename, 'rb+') as f:
+ lines = f.readlines()
+ # No explicit error checking for the sake of simplicity. /etc
+ # files are assumed to be well-formed, causing exceptions if
+ # not.
+ for line in lines:
+ entries = line.split(b':')
+ name = entries[0]
+ if mapping is None:
+ id = int(entries[2])
+ else:
+ id = mapping[name]
+ new_mapping[name] = id
+ # Sort by numeric id first, with entire line as secondary key
+ # (just in case that there is more than one entry for the same id).
+ lines.sort(key=lambda line: (new_mapping[line.split(b':')[0]], line))
+ # We overwrite the entire file, i.e. no truncate() necessary.
+ f.seek(0)
+ f.write(b''.join(lines))
+ return new_mapping
+
+def remove_backup(filename):
+ """
+ Removes the backup file for files like /etc/passwd.
+ """
+ backup_filename = filename + '-'
+ if os.path.exists(backup_filename):
+ os.unlink(backup_filename)
+
+def sort_passwd(sysconfdir):
+ """
+ Sorts passwd and group files in a rootfs /etc directory by ID.
+ Backup files are sometimes are inconsistent and then cannot be
+ sorted (YOCTO #11043), and more importantly, are not needed in
+ the initial rootfs, so they get deleted.
+ """
+ for main, shadow in (('passwd', 'shadow'),
+ ('group', 'gshadow')):
+ filename = os.path.join(sysconfdir, main)
+ remove_backup(filename)
+ if os.path.exists(filename):
+ mapping = sort_file(filename, None)
+ filename = os.path.join(sysconfdir, shadow)
+ remove_backup(filename)
+ if os.path.exists(filename):
+ sort_file(filename, mapping)
OpenPOWER on IntegriCloud