summaryrefslogtreecommitdiffstats
path: root/import-layers
diff options
context:
space:
mode:
authorPatrick Williams <patrick@stwcx.xyz>2017-02-23 20:56:04 -0600
committerPatrick Williams <patrick@stwcx.xyz>2017-03-29 21:31:41 +0000
commite69d235bfcfc3a8fef01d9ec274bbf878e9a00c5 (patch)
tree72fe198415672cf5192d8f6c52db4323e1fc9bb1 /import-layers
parentb2e6a9b5bcc725ab21f4a319e32ea09700a732b6 (diff)
downloadtalos-openbmc-e69d235bfcfc3a8fef01d9ec274bbf878e9a00c5.tar.gz
talos-openbmc-e69d235bfcfc3a8fef01d9ec274bbf878e9a00c5.zip
Squashed 'import-layers/meta-virtualization/'.
5cacf86 containerd: Fix build on 386 120745a xen: add a README to provide info for users 1249508 xen: remove unused patch b99f54e xen: update license checksum 36db755 xen: ensure we build release builds 358c14b xen: make it easy to override hypervisor config beadd3a xen: add gnu-efi to DEPENDS bec00eb docker: cosmetic - fix indentation 2307ad4 docker: use a switch to set GOARCH based on TARGET_ARCH value 629b6bb docker: update description 23eb02c docker: update to docker 1.12.5 release 20c1905 xen-image-minimal: if DISTRO_FEATURES is missing xen error 7946a48 README: make it easier to copy and paste send command 923bf51 xen: bump to version 4.8.0 1f3d204 xen: switch to updated download URL f364321 xen: add extra generated file to package ada27ac kernel: add linux-yocto-4.8 bbappend 2d00a79 protobuf: ptest: Fix QA file-rdep warning b2f3852 protobuf: Fix QA error for GNU_HASH d55a579 go-cross: Fix failure if building go-cross first c3d8676 go-native: Add work around for binutils >= 2.27 d866c43 containerd: use the target toolchain to build cgo components 58bc830 go-cross: Fix host contamination for x86_64 host to x86_64 target 5caa9ba lxc: add glibc-utils to rdepend 05d080d Revert "go-cross: Fix host contamination for x86_64 host to x86_64 target" b25af25 libvirt: remove .o files for ptest 7ff08e5 go-cross: Fix host contamination for x86_64 host to x86_64 target 1bf8c77 multipath-tools: Drop recipe b9b7ece criu: uprev to 2.5 c415f97 protobuf-c: uprev to 1.2.1 from 0.15 5e5e09a protobuf: uprev 3.0.0 from 2.6.1 222f8d4 kvmtool: add lightweight hypervisor native Linux KVM tool a3766e4 go-distribution-digest: Fix do_package failure 548c88a libvirt: Add missing test helpers and remove windows 1252 c46b790 protobuf: fix source location 84a1671 custom-licenses: add NewRelic license bc4459e cgroup-lite: update to version 1.11 db5ef80 docker: update license md5sum 951c83c docker: cgroup-lite is not required with systemd 3907b09 libvirt: update list of licenses aeb1763 openvswitch: add missing python modules to execute ovs-test program 42af8a1 libvirt: fix CVE-2016-5008 bdcf476 libvirt: fix build error for arm af2948e libvirt: upgrade 1.3.2 -> 1.3.5 357ca45 libvirt: fix python install in multilib case. f7fd448 xen: uprev to 4.7.0 9179537 libvirt: add missing configuration file 1fd5a59 containers: uprev docker (1.12.0), runc (1.0.0-rc) and containerd (0.2.2) 7d41ad1 docker: fix GNU hash QA warning 59c338a iasl: Upgrade to 20160527 from 20120215 b657b65 lxc: fixes lxc segment fault issue on arm if it is compiled with GCC 5.2 cb16321 base-files: remove bbappend hacking on fstab f53bca5 netns: New recipe 0ce0b5c runc: Use go-osarchmap to set GOARCH 6b656b8 runc: Backport fix for building against musl 7605443 docker-registry: Make version dependence compatible with opkg 06dccfa criu: fix build-deps qa warning 01aa8f1 docker: override GOROOT at build time 313b06b go-cross: allow tmp directory to already exist da1e90e protobuf: Add RDEPENDS for ptest dc7ab5b libvirt-python: backport a patch to fix crash in getAllDomainStats 16a31ef containerd: replace deprecated base_contains 0afa6e1 protobuf: add protobuf-2.5.0 into devtool 823c8cf criu: remove protobuf-2.6.1 and the related 3c3293e go-native: don't use host /var/tmp for temp build artifacts 77e846b docker: drop obselete dependencies 6f1ea8b lxc: workaround to ignore deprecated fd94b3f openvswitch: Fix rootfs creation errors 7e2ad37 go-cross: don't use host /var/tmp for temporary build artifacts a3617f2 globally replace 'base_contains' calls with 'bb.utils.contains' 1fd94d8 criu: Fix QA warning 6f17a76 docker: uprev to 1.11.1 fd1a6d1 runc: initial recipe e919b64 containerd: initial recipe 79654fc go: add go-cross 1.6 5dedd39 lxc: update configuration to include all required options REVERT: c4a1711 docker-registry: Make version dependence compatible with opkg REVERT: b6b8885 docker: drop obselete dependencies REVERT: 44440d7 go-cross: allow tmp directory to already exist REVERT: 7f3cc50 go-native: don't use host /var/tmp for temp build artifacts REVERT: 25ee1f1 go-cross: don't use host /var/tmp for temporary build artifacts REVERT: a9a9fc0 go: add go-cross 1.6 Change-Id: Ic4431940d01a4c0ec113786583c6e09cec88fb03 git-subtree-dir: import-layers/meta-virtualization git-subtree-split: 5cacf8632da2c20dc994c3b33608f0d3cea9df13 Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
Diffstat (limited to 'import-layers')
-rw-r--r--import-layers/meta-virtualization/README2
-rw-r--r--import-layers/meta-virtualization/files/custom-licenses/NewRelic33
-rw-r--r--import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.11.bb (renamed from import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb)6
-rw-r--r--import-layers/meta-virtualization/recipes-containers/containerd/containerd_git.bb91
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb34
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch63
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch108
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch46
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch29
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch25
-rw-r--r--import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch26
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb6
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb95
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch1828
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch56
-rw-r--r--import-layers/meta-virtualization/recipes-containers/docker/files/docker.service2
-rw-r--r--import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb12
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch48
-rw-r--r--import-layers/meta-virtualization/recipes-containers/runc/runc_git.bb66
-rw-r--r--import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend5
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc12
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc2
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb8
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch (renamed from import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch)6
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch (renamed from import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch)4
-rwxr-xr-ximport-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest (renamed from import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest)0
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb (renamed from import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb)10
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.0.0.bb (renamed from import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb)8
-rw-r--r--import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.0.0.bb (renamed from import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb)17
-rw-r--r--import-layers/meta-virtualization/recipes-extended/iasl/iasl/Make-CC-definition-conditional.patch29
-rw-r--r--import-layers/meta-virtualization/recipes-extended/iasl/iasl/iasl.1135
-rw-r--r--import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb29
-rw-r--r--import-layers/meta-virtualization/recipes-extended/iasl/iasl_20160527.bb36
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb4
-rw-r--r--import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb8
-rw-r--r--import-layers/meta-virtualization/recipes-extended/kvmtool/files/external-crosscompiler.patch31
-rw-r--r--import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb23
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-qemu-Let-empty-default-VNC-password-work-as-document.patch81
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-to-fix-build-error.patch (renamed from import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch)0
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc8
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-nsslinktest-also-build-virAtomic.h.patch40
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-Remove-Windows-1252-check-from-esxutilstest.patch28
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-add-missing-test_helper-files.patch29
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch16
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/install-missing-file.patch52
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch34
-rw-r--r--import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb (renamed from import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb)29
-rw-r--r--import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb46
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/README24
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen.inc35
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb10
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb10
-rw-r--r--import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb9
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg25
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend4
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend4
-rw-r--r--import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.8.bbappend19
-rw-r--r--import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb44
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc7
-rw-r--r--import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb1
60 files changed, 1233 insertions, 2265 deletions
diff --git a/import-layers/meta-virtualization/README b/import-layers/meta-virtualization/README
index c60044d2f..2578f9049 100644
--- a/import-layers/meta-virtualization/README
+++ b/import-layers/meta-virtualization/README
@@ -42,7 +42,7 @@ Maintainers: Raymond Danks <ray.danks@se-eng.com>
Bruce Ashfield <bruce.ashfield@gmail.com>
When sending single patches, please using something like:
-'git send-email -1 --to meta-virtualization@yoctoproject.org --subject-prefix=meta-virtualization][PATCH'
+$ git send-email -1 --to meta-virtualization@yoctoproject.org --subject-prefix='meta-virtualization][PATCH'
License
-------
diff --git a/import-layers/meta-virtualization/files/custom-licenses/NewRelic b/import-layers/meta-virtualization/files/custom-licenses/NewRelic
new file mode 100644
index 000000000..9c2e5e863
--- /dev/null
+++ b/import-layers/meta-virtualization/files/custom-licenses/NewRelic
@@ -0,0 +1,33 @@
+ Copyright (c) 2010-2014 New Relic, Inc. All rights reserved.
+
+Certain inventions disclosed in this file may be claimed within patents
+owned or patent applications filed by New Relic, Inc. or third parties.
+Subject to the terms of this notice, New Relic grants you a nonexclusive,
+nontransferable license, without the right to sublicense, to (a) install
+and execute one copy of these files on any number of workstations owned or
+controlled by you and (b) distribute verbatim copies of these files to
+third parties. As a condition to the foregoing grant, you must provide this
+notice along with each copy you distribute and you must not remove, alter,
+or obscure this notice.
+
+All other use, reproduction, modification, distribution, or other
+exploitation of these files is strictly prohibited, except as may be set
+forth in a separate written license agreement between you and New Relic.
+The terms of any such license agreement will control over this notice. The
+license stated above will be automatically terminated and revoked if you
+exceed its scope or violate any of the terms of this notice.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of New Relic, except as required for
+reasonable and customary use in describing the origin of this file and
+reproducing the content of this notice. You may not mark or brand this file
+with any trade name, trademarks, service marks, or product names other than
+the original brand (if any) provided by New Relic.
+
+Unless otherwise expressly agreed by New Relic in a separate written
+license agreement, these files are provided AS IS, WITHOUT WARRANTY OF ANY
+KIND, including without any implied warranties of MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE, TITLE, or NON-INFRINGEMENT. As a condition to
+your use of these files, you are solely responsible for such use. New Relic
+will have no liability to you for direct, indirect, consequential,
+incidental, special, or punitive damages or for lost profits or data.
diff --git a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.11.bb
index 3ca5238cc..b3af6f4c9 100644
--- a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb
+++ b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.11.bb
@@ -4,10 +4,10 @@ DESCRIPTION = "Light-weight package to set up cgroups at system boot."
HOMEPAGE = "http://packages.ubuntu.com/source/precise/cgroup-lite"
LICENSE = "GPLv2"
LIC_FILES_CHKSUM = "file://debian/copyright;md5=5d5da4e0867cf06014f87102154d0102"
-SRC_URI = "http://archive.ubuntu.com/ubuntu/pool/main/c/cgroup-lite/cgroup-lite_1.1.tar.gz"
+SRC_URI = "https://launchpad.net/ubuntu/+archive/primary/+files/cgroup-lite_1.11.tar.xz"
SRC_URI += "file://cgroups-init"
-SRC_URI[md5sum] = "041a0d8ad2b192271a2e5507fdb6809f"
-SRC_URI[sha256sum] = "e7f9992b90b5b4634f3b8fb42580ff28ff31093edb297ab872c37f61a94586bc"
+SRC_URI[md5sum] = "b20976194ee8fdb61e6b55281fb6ead4"
+SRC_URI[sha256sum] = "a79ab9ae6fb3ff3ce0aa5539b055c0379eaffdc6c5f003af4010fcea683c1a45"
inherit update-rc.d
diff --git a/import-layers/meta-virtualization/recipes-containers/containerd/containerd_git.bb b/import-layers/meta-virtualization/recipes-containers/containerd/containerd_git.bb
new file mode 100644
index 000000000..c2000b1c5
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/containerd/containerd_git.bb
@@ -0,0 +1,91 @@
+HOMEPAGE = "https://github.com/docker/containerd"
+SUMMARY = "containerd is a daemon to control runC"
+DESCRIPTION = "containerd is a daemon to control runC, built for performance and density. \
+ containerd leverages runC's advanced features such as seccomp and user namespace \
+ support as well as checkpoint and restore for cloning and live migration of containers."
+
+SRCREV = "0ac3cd1be170d180b2baed755e8f0da547ceb267"
+SRC_URI = "\
+ git://github.com/docker/containerd.git;nobranch=1 \
+ "
+
+# Apache-2.0 for containerd
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE.code;md5=aadc30f9c14d876ded7bedc0afd2d3d7"
+
+S = "${WORKDIR}/git"
+
+CONTAINERD_VERSION = "0.2.2"
+PV = "${CONTAINERD_VERSION}+git${SRCREV}"
+
+DEPENDS = "go-cross \
+ "
+
+RRECOMMENDS_${PN} = "lxc docker"
+CONTAINERD_PKG="github.com/docker/containerd"
+
+do_configure[noexec] = "1"
+
+do_compile() {
+ export GOARCH="${TARGET_ARCH}"
+ # supported amd64, 386, arm arm64
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ export GOARCH="amd64"
+ fi
+ if [ "${TARGET_ARCH}" = "aarch64" ]; then
+ export GOARCH="arm64"
+ fi
+ if [ "${TARGET_ARCH}" = "i586" ]; then
+ export GOARCH="386"
+ fi
+
+ # Set GOPATH. See 'PACKAGERS.md'. Don't rely on
+ # docker to download its dependencies but rather
+ # use dependencies packaged independently.
+ cd ${S}
+ rm -rf .gopath
+ mkdir -p .gopath/src/"$(dirname "${CONTAINERD_PKG}")"
+ ln -sf ../../../.. .gopath/src/"${CONTAINERD_PKG}"
+ export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ cd -
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CC_FOR_TARGET="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
+ export CXX_FOR_TARGET="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
+
+ oe_runmake static
+}
+
+# Note: disabled for now, since docker is launching containerd
+# inherit systemd
+# SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+# SYSTEMD_SERVICE_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','containerd.service','',d)}"
+
+do_install() {
+ mkdir -p ${D}/${bindir}
+
+ cp ${S}/bin/containerd ${D}/${bindir}/containerd
+ cp ${S}/bin/containerd-shim ${D}/${bindir}/containerd-shim
+ cp ${S}/bin/ctr ${D}/${bindir}/containerd-ctr
+
+ ln -sf containerd ${D}/${bindir}/docker-containerd
+ ln -sf containerd-shim ${D}/${bindir}/docker-containerd-shim
+ ln -sf containerd-ctr ${D}/${bindir}/docker-containerd-ctr
+
+ if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ install -d ${D}${systemd_unitdir}/system
+ install -m 644 ${S}/hack/containerd.service ${D}/${systemd_unitdir}/system
+ # adjust from /usr/local/bin to /usr/bin/
+ sed -e "s:/usr/local/bin/containerd:${bindir}/docker-containerd -l \"unix\:///var/run/docker/libcontainerd/docker-containerd.sock\":g" -i ${D}/${systemd_unitdir}/system/containerd.service
+ fi
+}
+
+FILES_${PN} += "/lib/systemd/system/*"
+
+INHIBIT_PACKAGE_STRIP = "1"
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
index 48bcdc271..dc0bdbe63 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
@@ -11,21 +11,22 @@ LICENSE = "GPLv2"
EXCLUDE_FROM_WORLD = "1"
-LIC_FILES_CHKSUM = "file://COPYING;md5=5cc804625b8b491b6b4312f0c9cb5efa"
+LIC_FILES_CHKSUM = "file://COPYING;md5=412de458544c1cb6a2b512cd399286e2"
-SRCREV = "4c5b23e52c1dc4e3fbbc7472b92e7b1ce9d22f02"
+SRCREV = "c031417255f6a5c4409d15ff0b36af5f6e90c559"
PR = "r0"
-PV = "1.6+git${SRCPV}"
+PV = "2.5+git${SRCPV}"
SRC_URI = "git://github.com/xemul/criu.git;protocol=git \
- file://0001-criu-Fix-toolchain-hardcode.patch \
- file://0002-criu-Skip-documentation-install.patch \
- file://0001-criu-Change-libraries-install-directory.patch \
- "
+ file://0001-criu-Fix-toolchain-hardcode.patch \
+ file://0002-criu-Skip-documentation-install.patch \
+ file://0001-criu-Change-libraries-install-directory.patch \
+ ${@bb.utils.contains('PACKAGECONFIG', 'selinux', '', 'file://disable-selinux.patch', d)} \
+ "
COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
-DEPENDS += "protobuf-c-native protobuf-c"
+DEPENDS += "libnl libcap protobuf-c-native protobuf-c"
S = "${WORKDIR}/git"
@@ -34,14 +35,16 @@ S = "${WORKDIR}/git"
# if the ARCH is ARMv7 or ARMv6.
# ARM BSPs need set CRIU_BUILD_ARCH variable for building CRIU.
#
-EXTRA_OEMAKE_arm += "ARCH=${CRIU_BUILD_ARCH} WERROR=0"
-EXTRA_OEMAKE_x86-64 += "ARCH=${TARGET_ARCH} WERROR=0"
-EXTRA_OEMAKE_aarch64 += "ARCH=${TARGET_ARCH} WERROR=0"
+EXTRA_OEMAKE_arm += "ARCH=arm UNAME-M=${CRIU_BUILD_ARCH} WERROR=0"
+EXTRA_OEMAKE_x86-64 += "ARCH=x86 WERROR=0"
+EXTRA_OEMAKE_aarch64 += "ARCH=arm64 WERROR=0"
EXTRA_OEMAKE_append += "SBINDIR=${sbindir} LIBDIR=${libdir} INCLUDEDIR=${includedir} PIEGEN=no"
EXTRA_OEMAKE_append += "LOGROTATEDIR=${sysconfdir} SYSTEMDUNITDIR=${systemd_unitdir}"
-CFLAGS += "-D__USE_GNU -D_GNU_SOURCE"
+CFLAGS += "-D__USE_GNU -D_GNU_SOURCE "
+
+CFLAGS += " -I${STAGING_INCDIR} -I${STAGING_INCDIR}/libnl3"
# overide LDFLAGS to allow criu to build without: "x86_64-poky-linux-ld: unrecognized option '-Wl,-O1'"
export LDFLAGS=""
@@ -51,9 +54,12 @@ export HOST_SYS
inherit setuptools
+PACKAGECONFIG ??= ""
+PACKAGECONFIG[selinux] = ",,libselinux"
+
do_compile_prepend() {
- rm -rf ${S}/protobuf/google/protobuf/descriptor.proto
- ln -s ${PKG_CONFIG_SYSROOT_DIR}/usr/include/google/protobuf/descriptor.proto ${S}/protobuf/google/protobuf/descriptor.proto
+ rm -rf ${S}/images/google/protobuf/descriptor.proto
+ ln -s ${PKG_CONFIG_SYSROOT_DIR}/usr/include/google/protobuf/descriptor.proto ${S}/images/google/protobuf/descriptor.proto
}
do_compile () {
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
index 28d638b31..a72140500 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
@@ -1,48 +1,39 @@
-From cb9933dc34af0b4d52c4584332600114ac65c402 Mon Sep 17 00:00:00 2001
+From 7ebde06e00b591a88397dad74a1aa47fd562eb50 Mon Sep 17 00:00:00 2001
From: Jianchuan Wang <jianchuan.wang@windriver.com>
-Date: Tue, 4 Aug 2015 17:45:51 +0800
-Subject: [PATCH] criu: Change libraries install directory
+Date: Tue, 16 Aug 2016 09:48:08 +0800
+Subject: [PATCH 1/2] criu: Change libraries install directory
-Install the libraries into /usr/lib(/usr/lib64)
+Install the libraries into /usr/lib(or /usr/lib64)
Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
---
- Makefile | 2 +-
- Makefile.inc | 9 ---------
- 2 files changed, 1 insertion(+), 10 deletions(-)
+ Makefile.install | 13 -------------
+ 1 file changed, 13 deletions(-)
-diff --git a/Makefile b/Makefile
-index 7f5c890..6dbc436 100644
---- a/Makefile
-+++ b/Makefile
-@@ -351,7 +351,7 @@ install-man:
+diff --git a/Makefile.install b/Makefile.install
+index dbc22e1..a30dc96 100644
+--- a/Makefile.install
++++ b/Makefile.install
+@@ -11,19 +11,6 @@ LIBDIR ?= $(PREFIX)/lib
+ INCLUDEDIR ?= $(PREFIX)/include/criu
+ LIBEXECDIR ?= $(PREFIX)/libexec
- install-crit: crit
- $(E) " INSTALL crit"
-- $(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX)
-+ $(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX) --install-lib=$(LIBDIR)
-
- .PHONY: install install-man install-crit install-criu
-
-diff --git a/Makefile.inc b/Makefile.inc
-index 5496f41..ba70aea 100644
---- a/Makefile.inc
-+++ b/Makefile.inc
-@@ -17,14 +17,5 @@ MANDIR := $(PREFIX)/share/man
- SYSTEMDUNITDIR := $(PREFIX)/lib/systemd/system/
- LOGROTATEDIR := $(PREFIX)/etc/logrotate.d/
- LIBDIR := $(PREFIX)/lib
--# For recent Debian/Ubuntu with multiarch support
--DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture \
-- -qDEB_HOST_MULTIARCH 2>/dev/null)
+-#
+-# For recent Debian/Ubuntu with multiarch support.
+-DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
-ifneq "$(DEB_HOST_MULTIARCH)" ""
--LIBDIR := $(PREFIX)/lib/$(DEB_HOST_MULTIARCH)
--# For most other systems
--else ifeq "$(shell uname -m)" "x86_64"
--LIBDIR := $(PREFIX)/lib64
+- LIBDIR ?= $(PREFIX)/lib/$(DEB_HOST_MULTIARCH)
+-else
+- #
+- # For most other systems
+- ifeq "$(shell uname -m)" "x86_64"
+- LIBDIR ?= $(PREFIX)/lib64
+- endif
-endif
+-
+ export BINDIR SBINDIR MANDIR SYSTEMDUNITDIR LOGROTATEDIR
+ export INCLUDEDIR LIBDIR DESTDIR PREFIX LIBEXECDIR
- INCLUDEDIR := $(PREFIX)/include/criu
--
-1.9.1
+2.7.4
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
index 2fabe0adf..d30f2ac2c 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
@@ -1,46 +1,112 @@
-From 3d4f112fdb434712eba09239a468842323f1af4c Mon Sep 17 00:00:00 2001
-From: Yang Shi <yang.shi@windriver.com>
-Date: Tue, 26 Aug 2014 14:42:42 -0700
-Subject: [PATCH 1/2] criu: Fix toolchain hardcode
+From 057d30f15e81dcc4162d6fbee06f126564596397 Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Wed, 7 Sep 2016 23:55:15 -0400
+Subject: [PATCH] criu: Fix toolchain hardcode
Replace ":=" to "?=" so that the toolchain used by bitbake build system will
be taken.
Signed-off-by: Yang Shi <yang.shi@windriver.com>
-Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
---
- Makefile | 18 +++++++++---------
- 1 file changed, 9 insertions(+), 9 deletions(-)
+ Makefile | 2 +-
+ criu/pie/Makefile | 2 +-
+ scripts/nmk/scripts/include.mk | 2 +-
+ scripts/nmk/scripts/tools.mk | 40 ++++++++++++++++++++--------------------
+ 4 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/Makefile b/Makefile
-index f1c8784..43252ec 100644
+index 52cbd6a..f66279b 100644
--- a/Makefile
+++ b/Makefile
-@@ -23,15 +23,15 @@ export VERSION_SO_MAJOR VERSION_SO_MINOR
- # Common definitions
+@@ -60,7 +60,7 @@ LDARCH ?= $(SRCARCH)
+
+ export SRCARCH LDARCH VDSO
+
+-UNAME-M := $(shell uname -m)
++UNAME-M ?= $(shell uname -m)
+ export UNAME-M
+
+ ifeq ($(ARCH),arm)
+diff --git a/criu/pie/Makefile b/criu/pie/Makefile
+index 125b02f..9975871 100644
+--- a/criu/pie/Makefile
++++ b/criu/pie/Makefile
+@@ -17,7 +17,7 @@ restorer-obj-e += ./$(ARCH_DIR)/syscalls.built-in.o
#
+ CFLAGS := $(filter-out -pg $(CFLAGS-GCOV),$(CFLAGS))
+ CFLAGS += -iquote $(SRC_DIR)/criu/pie/piegen
+-CFLAGS += -iquote $(SRC_DIR)/criu/arch/$(ARCH)/include
++CFLAGS += -iquote $(SRC_DIR)/criu/arch/$(SRCARCH)/include
+ CFLAGS += -iquote $(SRC_DIR)/criu/include
+ CFLAGS += -iquote $(SRC_DIR)
--FIND := find
--CSCOPE := cscope
+diff --git a/scripts/nmk/scripts/include.mk b/scripts/nmk/scripts/include.mk
+index 4c496f7..a7250cd 100644
+--- a/scripts/nmk/scripts/include.mk
++++ b/scripts/nmk/scripts/include.mk
+@@ -20,7 +20,7 @@ SUBARCH := $(shell uname -m | sed \
+ -e s/aarch64.*/arm64/)
+
+ ARCH ?= $(SUBARCH)
+-SRCARCH := $(ARCH)
++SRCARCH ?= $(ARCH)
+
+ export SUBARCH ARCH SRCARCH
+
+diff --git a/scripts/nmk/scripts/tools.mk b/scripts/nmk/scripts/tools.mk
+index 0538dde..e4af068 100644
+--- a/scripts/nmk/scripts/tools.mk
++++ b/scripts/nmk/scripts/tools.mk
+@@ -2,28 +2,28 @@ ifndef ____nmk_defined__tools
+
+ #
+ # System tools shorthands
-RM := rm -f
-LD := $(CROSS_COMPILE)ld
-CC := $(CROSS_COMPILE)gcc
+-CPP := $(CC) -E
+-AS := $(CROSS_COMPILE)as
+-AR := $(CROSS_COMPILE)ar
+-STRIP := $(CROSS_COMPILE)strip
+-OBJCOPY := $(CROSS_COMPILE)objcopy
+-OBJDUMP := $(CROSS_COMPILE)objdump
-NM := $(CROSS_COMPILE)nm
--SH := bash
-MAKE := make
--OBJCOPY := $(CROSS_COMPILE)objcopy
-+FIND ?= find
-+CSCOPE ?= cscope
+-MKDIR := mkdir -p
+-AWK := awk
+-PERL := perl
+-PYTHON := python
+-FIND := find
+-SH := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+RM ?= rm -f
+LD ?= $(CROSS_COMPILE)ld
+CC ?= $(CROSS_COMPILE)gcc
++CPP ?= $(CC) -E
++AS ?= $(CROSS_COMPILE)as
++AR ?= $(CROSS_COMPILE)ar
++STRIP ?= $(CROSS_COMPILE)strip
++OBJCOPY ?= $(CROSS_COMPILE)objcopy
++OBJDUMP ?= $(CROSS_COMPILE)objdump
+NM ?= $(CROSS_COMPILE)nm
-+SH ?= bash
+MAKE ?= make
-+OBJCOPY ?= $(CROSS_COMPILE)objcopy
-
- CFLAGS += $(USERCFLAGS)
++MKDIR ?= mkdir -p
++AWK ?= awk
++PERL ?= perl
++PYTHON ?= python
++FIND ?= find
++SH ?= $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ else if [ -x /bin/bash ]; then echo /bin/bash; \
+ else echo sh; fi ; fi)
+-CSCOPE := cscope
+-ETAGS := etags
+-CTAGS := ctags
++CSCOPE ?= cscope
++ETAGS ?= etags
++CTAGS ?= ctags
+ export RM LD CC CPP AS AR STRIP OBJCOPY OBJDUMP
+ export NM SH MAKE MKDIR AWK PERL PYTHON SH CSCOPE
--
-2.0.2
+2.8.1
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
deleted file mode 100644
index b6fbf0176..000000000
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From 81bc5928cdc1b432656eb6590967306d8cf3ac9d Mon Sep 17 00:00:00 2001
-From: Jianchuan Wang <jianchuan.wang@windriver.com>
-Date: Tue, 4 Aug 2015 10:22:21 +0800
-Subject: [PATCH] protobuf-c: Remove the rules which depend on the native
- command
-
-Those rules are not for cross-compile since the command protoc-c/cxx-generate-packed-data
-need be executed to generate some local files in the compiling processing.
-
-Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
----
- Makefile.am | 13 +++++++------
- 1 file changed, 7 insertions(+), 6 deletions(-)
-
-diff --git a/Makefile.am b/Makefile.am
-index 310aa09..0602e96 100644
---- a/Makefile.am
-+++ b/Makefile.am
-@@ -148,17 +148,18 @@ t_generated_code2_cxx_generate_packed_data_CXXFLAGS = \
- t_generated_code2_cxx_generate_packed_data_LDADD = \
- $(protobuf_LIBS)
-
--t/test.pb-c.c t/test.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test.proto
-- $(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
-+t/test.pb-c.c t/test.pb-c.h: $(top_srcdir)/t/test.proto
-+ $(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
-
--t/test-full.pb-c.c t/test-full.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test-full.proto
-- $(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
-+t/test-full.pb-c.c t/test-full.pb-c.h: $(top_srcdir)/t/test-full.proto
-+ $(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
-
- t/test-full.pb.cc t/test-full.pb.h: @PROTOC@ $(top_srcdir)/t/test-full.proto
- $(AM_V_GEN)@PROTOC@ -I$(top_srcdir) --cpp_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
-
--t/generated-code2/test-full-cxx-output.inc: t/generated-code2/cxx-generate-packed-data$(EXEEXT)
-- $(AM_V_GEN)$(top_builddir)/t/generated-code2/cxx-generate-packed-data$(EXEEXT) > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
-+t/generated-code2/test-full-cxx-output.inc:
-+ mkdir -p $(top_builddir)/t/generated-code2
-+ $(AM_V_GEN)cxx-generate-packed-data > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
-
- BUILT_SOURCES += \
- t/test.pb-c.c t/test.pb-c.h \
---
-1.9.1
-
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
index eaf816031..ba414d966 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
@@ -1,29 +1,28 @@
-From e9c2a94b9eb37ad24672b10caa398bd18282b962 Mon Sep 17 00:00:00 2001
-From: Yang Shi <yang.shi@windriver.com>
-Date: Tue, 26 Aug 2014 14:44:51 -0700
+From 07d9b3d0c372e45127dd51781d9564e8bee90dbe Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Tue, 16 Aug 2016 09:42:24 +0800
Subject: [PATCH 2/2] criu: Skip documentation install
asciidoc is needed to generate CRIU documentation, so skip it in install.
-Signed-off-by: Yang Shi <yang.shi@windriver.com>
-Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
---
- Makefile | 2 +-
+ Makefile.install | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/Makefile b/Makefile
-index 43252ec..e25edcc 100644
---- a/Makefile
-+++ b/Makefile
-@@ -265,7 +265,7 @@ install: $(PROGRAM) install-man
- $(Q) install -m 644 scripts/logrotate.d/criu-service $(DESTDIR)$(LOGROTATEDIR)
+diff --git a/Makefile.install b/Makefile.install
+index a30dc96..33143fb 100644
+--- a/Makefile.install
++++ b/Makefile.install
+@@ -22,7 +22,7 @@ install-tree:
+ .PHONY: install-tree
install-man:
- $(Q) $(MAKE) -C Documentation install
+# $(Q) $(MAKE) -C Documentation install
+ .PHONY: install-man
- .PHONY: install install-man
-
+ install-lib: lib
--
-2.0.2
+2.7.4
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
deleted file mode 100644
index ef60fc018..000000000
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From f8b7c90f6da90b67bdd7d5301894c5c28bd9d076 Mon Sep 17 00:00:00 2001
-From: Jianchuan Wang <jianchuan.wang@windriver.com>
-Date: Mon, 10 Aug 2015 11:23:31 +0800
-Subject: [PATCH] Omit google-apputils dependency
-
-Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
----
- python/setup.py | 1 -
- 1 file changed, 1 deletion(-)
-
-diff --git a/python/setup.py b/python/setup.py
-index 2450a77..6f6bffb 100755
---- a/python/setup.py
-+++ b/python/setup.py
-@@ -189,7 +189,6 @@ if __name__ == '__main__':
- 'google.protobuf.text_format'],
- cmdclass = { 'clean': clean, 'build_py': build_py },
- install_requires = ['setuptools'],
-- setup_requires = ['google-apputils'],
- ext_modules = ext_module_list,
- url = 'https://developers.google.com/protocol-buffers/',
- maintainer = maintainer_email,
---
-1.9.1
-
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch
new file mode 100644
index 000000000..da881dd37
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/disable-selinux.patch
@@ -0,0 +1,26 @@
+Upstream-Status: Inappropriate [disable feature]
+
+It shows warning when build crius if libselinux has been built already:
+
+ WARNING: QA Issue: criu rdepends on libselinux, but it isn't a build dependency? [build-deps]
+
+Apply this patch to disable selinux support when 'selinux' is not in PACKAGECONF.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+
+diff --git a/Makefile.config b/Makefile.config
+index ce4b8d8..3ac2780 100644
+--- a/Makefile.config
++++ b/Makefile.config
+@@ -8,11 +8,6 @@ ifeq ($(call try-cc,$(LIBBSD_DEV_TEST),-lbsd),y)
+ DEFINES += -DCONFIG_HAS_LIBBSD
+ endif
+
+-ifeq ($(call pkg-config-check,libselinux),y)
+- LIBS := -lselinux $(LIBS)
+- DEFINES += -DCONFIG_HAS_SELINUX
+-endif
+-
+ $(CONFIG): scripts/utilities.mak scripts/feature-tests.mak include/config-base.h
+ $(E) " GEN " $@
+ $(Q) @echo '#ifndef __CR_CONFIG_H__' > $@
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
index 032044073..e94e98581 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
@@ -73,14 +73,14 @@ RDEPENDS_${PN} += "\
inherit setuptools systemd
-SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
-SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker-registry.service','',d)}"
+SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','docker-registry.service','',d)}"
do_install_append() {
mkdir -p ${D}/etc/default/
cp ${WORKDIR}/docker-registry.conf ${D}/etc/default/docker-registry
- if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
install -d ${D}${systemd_unitdir}/system
install -m 644 ${WORKDIR}/docker-registry.service ${D}/${systemd_unitdir}/system
sed -i "s|#WORKDIR#|${PYTHON_SITEPACKAGES_DIR}/docker_registry|" ${D}/${systemd_unitdir}/system/docker-registry.service
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
index 42a336e5f..b0fda3502 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
+++ b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
@@ -9,35 +9,34 @@ DESCRIPTION = "Linux container runtime \
large-scale web deployments, database clusters, continuous deployment \
systems, private PaaS, service-oriented architectures, etc. \
. \
- This package contains the daemon and client. Using docker.io on non-amd64 \
- hosts is not supported at this time. Please be careful when using it \
- on anything besides amd64. \
+ This package contains the daemon and client. Using docker.io is \
+ officially supported on x86_64 and arm (32-bit) hosts. \
+ Other architectures are considered experimental. \
. \
- Also, note that kernel version 3.8 or above is required for proper \
+ Also, note that kernel version 3.10 or above is required for proper \
operation of the daemon process, and that any lower versions may have \
subtle and/or glaring issues. \
"
-SRCREV = "76d6bc9a9f1690e16f3721ba165364688b626de2"
+SRCREV = "7392c3b0ce0f9d3e918a321c66668c5d1ef4f689"
SRC_URI = "\
git://github.com/docker/docker.git;nobranch=1 \
file://docker.service \
file://docker.init \
file://hi.Dockerfile \
- file://disable_sha1sum_startup.patch \
- file://Bump-bolt-to-v1.1.0.patch \
"
# Apache-2.0 for docker
LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=cc2221abf0b96ea39dd68141b70f7937"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=aadc30f9c14d876ded7bedc0afd2d3d7"
S = "${WORKDIR}/git"
-DOCKER_VERSION = "1.9.0"
+DOCKER_VERSION = "1.12.5"
PV = "${DOCKER_VERSION}+git${SRCREV}"
-DEPENDS = "go-cross \
+DEPENDS = " \
+ go-cross \
go-cli \
go-pty \
go-context \
@@ -55,7 +54,10 @@ DEPENDS = "go-cross \
"
DEPENDS_append_class-target = "lvm2"
-RDEPENDS_${PN} = "curl aufs-util git cgroup-lite util-linux iptables"
+RDEPENDS_${PN} = "curl aufs-util git util-linux iptables \
+ ${@bb.utils.contains('DISTRO_FEATURES','systemd','','cgroup-lite',d)} \
+ "
+RDEPENDS_${PN} += "containerd runc"
RRECOMMENDS_${PN} = "lxc docker-registry rt-tests"
RRECOMMENDS_${PN} += " kernel-module-dm-thin-pool kernel-module-nf-nat"
DOCKER_PKG="github.com/docker/docker"
@@ -63,14 +65,29 @@ DOCKER_PKG="github.com/docker/docker"
do_configure[noexec] = "1"
do_compile() {
- export GOARCH="${TARGET_ARCH}"
- # supported amd64, 386, arm arm64
- if [ "${TARGET_ARCH}" = "x86_64" ]; then
- export GOARCH="amd64"
- fi
- if [ "${TARGET_ARCH}" = "aarch64" ]; then
- export GOARCH="arm64"
- fi
+ case "${TARGET_ARCH}" in
+ arm)
+ GOARCH=arm
+ case "${TUNE_PKGARCH}" in
+ cortexa*)
+ export GOARM=7
+ ;;
+ esac
+ ;;
+ aarch64)
+ GOARCH=arm64
+ ;;
+ i586|i686)
+ GOARCH=386
+ ;;
+ x86_64)
+ GOARCH=amd64
+ ;;
+ *)
+ GOARCH="${TARGET_ARCH}"
+ ;;
+ esac
+ export GOARCH
# Set GOPATH. See 'PACKAGERS.md'. Don't rely on
# docker to download its dependencies but rather
@@ -80,13 +97,14 @@ do_compile() {
mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ export GOROOT="${STAGING_DIR_NATIVE}/${nonarch_libdir}/${HOST_SYS}/go"
cd -
# Pass the needed cflags/ldflags so that cgo
# can find the needed headers files and libraries
export CGO_ENABLED="1"
- export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
- export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
# in order to exclude devicemapper and btrfs - https://github.com/docker/docker/issues/14056
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
@@ -99,28 +117,27 @@ do_compile() {
inherit systemd update-rc.d
-SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
-SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker.service','',d)}"
+SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','systemd','docker.service','',d)}"
-INITSCRIPT_PACKAGES += "${@base_contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
-INITSCRIPT_NAME_${PN} = "${@base_contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
+INITSCRIPT_PACKAGES += "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
+INITSCRIPT_NAME_${PN} = "${@bb.utils.contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
INITSCRIPT_PARAMS_${PN} = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
do_install() {
mkdir -p ${D}/${bindir}
- cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} \
- ${D}/${bindir}/docker
- cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/dockerinit-${DOCKER_VERSION} \
- ${D}/${bindir}/dockerinit
+ cp ${S}/bundles/latest/dynbinary-client/docker ${D}/${bindir}/docker
+ cp ${S}/bundles/latest/dynbinary-daemon/dockerd ${D}/${bindir}/dockerd
+ cp ${S}/bundles/latest/dynbinary-daemon/docker-proxy ${D}/${bindir}/docker-proxy
- if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
install -d ${D}${systemd_unitdir}/system
install -m 644 ${S}/contrib/init/systemd/docker.* ${D}/${systemd_unitdir}/system
# replaces one copied from above with one that uses the local registry for a mirror
install -m 644 ${WORKDIR}/docker.service ${D}/${systemd_unitdir}/system
- else
- install -d ${D}${sysconfdir}/init.d
- install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
+ else
+ install -d ${D}${sysconfdir}/init.d
+ install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
fi
mkdir -p ${D}/usr/share/docker/
@@ -133,15 +150,5 @@ GROUPADD_PARAM_${PN} = "-r docker"
FILES_${PN} += "/lib/systemd/system/*"
-# DO NOT STRIP docker and dockerinit!!!
-#
-# Reason:
-# The "docker" package contains two binaries: "docker" and "dockerinit",
-# which are both written in Go. The "dockerinit" package is built first,
-# then its checksum is given to the build process compiling the "docker"
-# binary. Hence the checksum of the unstripped "dockerinit" binary is hard
-# coded into the "docker" binary. At runtime the "docker" binary invokes
-# the "dockerinit" binary, but before doing that it ensures the checksum
-# of "dockerinit" matches with the hard coded value.
-#
+# DO NOT STRIP docker
INHIBIT_PACKAGE_STRIP = "1"
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
deleted file mode 100644
index ca4ad812f..000000000
--- a/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
+++ /dev/null
@@ -1,1828 +0,0 @@
-From a41917c2c88bd7f694d141ac67f4a194aaa16fa1 Mon Sep 17 00:00:00 2001
-From: Qiang Huang <h.huangqiang@huawei.com>
-Date: Wed, 28 Oct 2015 08:49:45 +0800
-Subject: [PATCH] Bump bolt to v1.1.0
-
-It adds ARM64, ppc64le, s390x, solaris support, and a bunch of
-bugfixs.
-
-Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
----
- hack/vendor.sh | 2 +-
- vendor/src/github.com/boltdb/bolt/.gitignore | 1 +
- vendor/src/github.com/boltdb/bolt/README.md | 250 +++++++++++++++++++--
- vendor/src/github.com/boltdb/bolt/batch.go | 138 ++++++++++++
- vendor/src/github.com/boltdb/bolt/bolt_386.go | 5 +-
- vendor/src/github.com/boltdb/bolt/bolt_amd64.go | 3 +
- vendor/src/github.com/boltdb/bolt/bolt_arm.go | 5 +-
- vendor/src/github.com/boltdb/bolt/bolt_arm64.go | 9 +
- vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go | 9 +
- vendor/src/github.com/boltdb/bolt/bolt_s390x.go | 9 +
- vendor/src/github.com/boltdb/bolt/bolt_unix.go | 37 ++-
- .../github.com/boltdb/bolt/bolt_unix_solaris.go | 101 +++++++++
- vendor/src/github.com/boltdb/bolt/bolt_windows.go | 10 +-
- vendor/src/github.com/boltdb/bolt/bucket.go | 29 ++-
- vendor/src/github.com/boltdb/bolt/cursor.go | 12 +-
- vendor/src/github.com/boltdb/bolt/db.go | 195 ++++++++++++----
- vendor/src/github.com/boltdb/bolt/errors.go | 4 +
- vendor/src/github.com/boltdb/bolt/freelist.go | 28 ++-
- vendor/src/github.com/boltdb/bolt/node.go | 36 ++-
- vendor/src/github.com/boltdb/bolt/page.go | 45 +++-
- vendor/src/github.com/boltdb/bolt/tx.go | 80 +++++--
- 21 files changed, 886 insertions(+), 122 deletions(-)
- create mode 100644 vendor/src/github.com/boltdb/bolt/batch.go
- create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm64.go
- create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
- create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_s390x.go
- create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
-
-diff --git a/hack/vendor.sh b/hack/vendor.sh
-index d872d4a..c28e677 100755
---- a/hack/vendor.sh
-+++ b/hack/vendor.sh
-@@ -36,7 +36,7 @@ clone git github.com/coreos/etcd v2.2.0
- fix_rewritten_imports github.com/coreos/etcd
- clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
- clone git github.com/hashicorp/consul v0.5.2
--clone git github.com/boltdb/bolt v1.0
-+clone git github.com/boltdb/bolt v1.1.0
-
- # get graph and distribution packages
- clone git github.com/docker/distribution 20c4b7a1805a52753dfd593ee1cc35558722a0ce # docker/1.9 branch
-diff --git a/vendor/src/github.com/boltdb/bolt/.gitignore b/vendor/src/github.com/boltdb/bolt/.gitignore
-index b2bb382..c7bd2b7 100644
---- a/vendor/src/github.com/boltdb/bolt/.gitignore
-+++ b/vendor/src/github.com/boltdb/bolt/.gitignore
-@@ -1,3 +1,4 @@
- *.prof
- *.test
-+*.swp
- /bin/
-diff --git a/vendor/src/github.com/boltdb/bolt/README.md b/vendor/src/github.com/boltdb/bolt/README.md
-index 727e977..0a33ebc 100644
---- a/vendor/src/github.com/boltdb/bolt/README.md
-+++ b/vendor/src/github.com/boltdb/bolt/README.md
-@@ -16,7 +16,7 @@ and setting values. That's it.
-
- ## Project Status
-
--Bolt is stable and the API is fixed. Full unit test coverage and randomized
-+Bolt is stable and the API is fixed. Full unit test coverage and randomized
- black box testing are used to ensure database consistency and thread safety.
- Bolt is currently in high-load production environments serving databases as
- large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
-@@ -87,6 +87,11 @@ are not thread safe. To work with data in multiple goroutines you must start
- a transaction for each one or use locking to ensure only one goroutine accesses
- a transaction at a time. Creating transaction from the `DB` is thread safe.
-
-+Read-only transactions and read-write transactions should not depend on one
-+another and generally shouldn't be opened simultaneously in the same goroutine.
-+This can cause a deadlock as the read-write transaction needs to periodically
-+re-map the data file but it cannot do so while a read-only transaction is open.
-+
-
- #### Read-write transactions
-
-@@ -120,12 +125,88 @@ err := db.View(func(tx *bolt.Tx) error {
- })
- ```
-
--You also get a consistent view of the database within this closure, however,
-+You also get a consistent view of the database within this closure, however,
- no mutating operations are allowed within a read-only transaction. You can only
- retrieve buckets, retrieve values, and copy the database within a read-only
- transaction.
-
-
-+#### Batch read-write transactions
-+
-+Each `DB.Update()` waits for disk to commit the writes. This overhead
-+can be minimized by combining multiple updates with the `DB.Batch()`
-+function:
-+
-+```go
-+err := db.Batch(func(tx *bolt.Tx) error {
-+ ...
-+ return nil
-+})
-+```
-+
-+Concurrent Batch calls are opportunistically combined into larger
-+transactions. Batch is only useful when there are multiple goroutines
-+calling it.
-+
-+The trade-off is that `Batch` can call the given
-+function multiple times, if parts of the transaction fail. The
-+function must be idempotent and side effects must take effect only
-+after a successful return from `DB.Batch()`.
-+
-+For example: don't display messages from inside the function, instead
-+set variables in the enclosing scope:
-+
-+```go
-+var id uint64
-+err := db.Batch(func(tx *bolt.Tx) error {
-+ // Find last key in bucket, decode as bigendian uint64, increment
-+ // by one, encode back to []byte, and add new key.
-+ ...
-+ id = newValue
-+ return nil
-+})
-+if err != nil {
-+ return ...
-+}
-+fmt.Println("Allocated ID %d", id)
-+```
-+
-+
-+#### Managing transactions manually
-+
-+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
-+function. These helper functions will start the transaction, execute a function,
-+and then safely close your transaction if an error is returned. This is the
-+recommended way to use Bolt transactions.
-+
-+However, sometimes you may want to manually start and end your transactions.
-+You can use the `Tx.Begin()` function directly but _please_ be sure to close the
-+transaction.
-+
-+```go
-+// Start a writable transaction.
-+tx, err := db.Begin(true)
-+if err != nil {
-+ return err
-+}
-+defer tx.Rollback()
-+
-+// Use the transaction...
-+_, err := tx.CreateBucket([]byte("MyBucket"))
-+if err != nil {
-+ return err
-+}
-+
-+// Commit the transaction and check for error.
-+if err := tx.Commit(); err != nil {
-+ return err
-+}
-+```
-+
-+The first argument to `DB.Begin()` is a boolean stating if the transaction
-+should be writable.
-+
-+
- ### Using buckets
-
- Buckets are collections of key/value pairs within the database. All keys in a
-@@ -175,13 +256,61 @@ db.View(func(tx *bolt.Tx) error {
- ```
-
- The `Get()` function does not return an error because its operation is
--guarenteed to work (unless there is some kind of system failure). If the key
-+guaranteed to work (unless there is some kind of system failure). If the key
- exists then it will return its byte slice value. If it doesn't exist then it
- will return `nil`. It's important to note that you can have a zero-length value
- set to a key which is different than the key not existing.
-
- Use the `Bucket.Delete()` function to delete a key from the bucket.
-
-+Please note that values returned from `Get()` are only valid while the
-+transaction is open. If you need to use a value outside of the transaction
-+then you must use `copy()` to copy it to another byte slice.
-+
-+
-+### Autoincrementing integer for the bucket
-+By using the NextSequence() function, you can let Bolt determine a sequence
-+which can be used as the unique identifier for your key/value pairs. See the
-+example below.
-+
-+```go
-+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
-+func (s *Store) CreateUser(u *User) error {
-+ return s.db.Update(func(tx *bolt.Tx) error {
-+ // Retrieve the users bucket.
-+ // This should be created when the DB is first opened.
-+ b := tx.Bucket([]byte("users"))
-+
-+ // Generate ID for the user.
-+ // This returns an error only if the Tx is closed or not writeable.
-+ // That can't happen in an Update() call so I ignore the error check.
-+ id, _ = b.NextSequence()
-+ u.ID = int(id)
-+
-+ // Marshal user data into bytes.
-+ buf, err := json.Marshal(u)
-+ if err != nil {
-+ return err
-+ }
-+
-+ // Persist bytes to users bucket.
-+ return b.Put(itob(u.ID), buf)
-+ })
-+}
-+
-+// itob returns an 8-byte big endian representation of v.
-+func itob(v int) []byte {
-+ b := make([]byte, 8)
-+ binary.BigEndian.PutUint64(b, uint64(v))
-+ return b
-+}
-+
-+type User struct {
-+ ID int
-+ ...
-+}
-+
-+```
-
- ### Iterating over keys
-
-@@ -254,7 +383,7 @@ db.View(func(tx *bolt.Tx) error {
- max := []byte("2000-01-01T00:00:00Z")
-
- // Iterate over the 90's.
-- for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) != -1; k, v = c.Next() {
-+ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
- fmt.Printf("%s: %s\n", k, v)
- }
-
-@@ -294,7 +423,7 @@ func (*Bucket) DeleteBucket(key []byte) error
-
- ### Database backups
-
--Bolt is a single file so it's easy to backup. You can use the `Tx.Copy()`
-+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
- function to write a consistent view of the database to a writer. If you call
- this from a read-only transaction, it will perform a hot backup and not block
- your other database reads and writes. It will also use `O_DIRECT` when available
-@@ -305,11 +434,12 @@ do database backups:
-
- ```go
- func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
-- err := db.View(func(tx bolt.Tx) error {
-+ err := db.View(func(tx *bolt.Tx) error {
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
- w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
-- return tx.Copy(w)
-+ _, err := tx.WriteTo(w)
-+ return err
- })
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
-@@ -351,14 +481,13 @@ go func() {
- // Grab the current stats and diff them.
- stats := db.Stats()
- diff := stats.Sub(&prev)
--
-+
- // Encode stats to JSON and print to STDERR.
- json.NewEncoder(os.Stderr).Encode(diff)
-
- // Save stats for the next loop.
- prev = stats
- }
--}
- }()
- ```
-
-@@ -366,25 +495,83 @@ It's also useful to pipe these stats to a service such as statsd for monitoring
- or to provide an HTTP endpoint that will perform a fixed-length sample.
-
-
-+### Read-Only Mode
-+
-+Sometimes it is useful to create a shared, read-only Bolt database. To this,
-+set the `Options.ReadOnly` flag when opening your database. Read-only mode
-+uses a shared lock to allow multiple processes to read from the database but
-+it will block any processes from opening the database in read-write mode.
-+
-+```go
-+db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
-+if err != nil {
-+ log.Fatal(err)
-+}
-+```
-+
-+
- ## Resources
-
- For more information on getting started with Bolt, check out the following articles:
-
- * [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
-+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
-+
-+
-+## Comparison with other databases
-+
-+### Postgres, MySQL, & other relational databases
-+
-+Relational databases structure data into rows and are only accessible through
-+the use of SQL. This approach provides flexibility in how you store and query
-+your data but also incurs overhead in parsing and planning SQL statements. Bolt
-+accesses all data by a byte slice key. This makes Bolt fast to read and write
-+data by key but provides no built-in support for joining values together.
-+
-+Most relational databases (with the exception of SQLite) are standalone servers
-+that run separately from your application. This gives your systems
-+flexibility to connect multiple application servers to a single database
-+server but also adds overhead in serializing and transporting data over the
-+network. Bolt runs as a library included in your application so all data access
-+has to go through your application's process. This brings data closer to your
-+application but limits multi-process access to the data.
-+
-+
-+### LevelDB, RocksDB
-
-+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
-+they are libraries bundled into the application, however, their underlying
-+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
-+random writes by using a write ahead log and multi-tiered, sorted files called
-+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-+have trade offs.
-
-+If you require a high random write throughput (>10,000 w/sec) or you need to use
-+spinning disks then LevelDB could be a good choice. If your application is
-+read-heavy or does a lot of range scans then Bolt could be a good choice.
-
--## Comparing Bolt to LMDB
-+One other important consideration is that LevelDB does not have transactions.
-+It supports batch writing of key/values pairs and it supports read snapshots
-+but it will not give you the ability to do a compare-and-swap operation safely.
-+Bolt supports fully serializable ACID transactions.
-+
-+
-+### LMDB
-
- Bolt was originally a port of LMDB so it is architecturally similar. Both use
--a B+tree, have ACID semanetics with fully serializable transactions, and support
-+a B+tree, have ACID semantics with fully serializable transactions, and support
- lock-free MVCC using a single writer and multiple readers.
-
- The two projects have somewhat diverged. LMDB heavily focuses on raw performance
- while Bolt has focused on simplicity and ease of use. For example, LMDB allows
--several unsafe actions such as direct writes and append writes for the sake of
--performance. Bolt opts to disallow actions which can leave the database in a
--corrupted state. The only exception to this in Bolt is `DB.NoSync`.
-+several unsafe actions such as direct writes for the sake of performance. Bolt
-+opts to disallow actions which can leave the database in a corrupted state. The
-+only exception to this in Bolt is `DB.NoSync`.
-+
-+There are also a few differences in API. LMDB requires a maximum mmap size when
-+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
-+automatically. LMDB overloads the getter and setter functions with multiple
-+flags whereas Bolt splits these specialized cases into their own functions.
-
-
- ## Caveats & Limitations
-@@ -425,14 +612,33 @@ Here are a few things to note when evaluating and using Bolt:
- can in memory and will release memory as needed to other processes. This means
- that Bolt can show very high memory usage when working with large databases.
- However, this is expected and the OS will release memory as needed. Bolt can
-- handle databases much larger than the available physical RAM.
-+ handle databases much larger than the available physical RAM, provided its
-+ memory-map fits in the process virtual address space. It may be problematic
-+ on 32-bits systems.
-+
-+* The data structures in the Bolt database are memory mapped so the data file
-+ will be endian specific. This means that you cannot copy a Bolt file from a
-+ little endian machine to a big endian machine and have it work. For most
-+ users this is not a concern since most modern CPUs are little endian.
-+
-+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
-+ and return free pages back to the disk. Instead, Bolt maintains a free list
-+ of unused pages within its data file. These free pages can be reused by later
-+ transactions. This works well for many use cases as databases generally tend
-+ to grow. However, it's important to note that deleting large chunks of data
-+ will not allow you to reclaim that space on disk.
-+
-+ For more information on page allocation, [see this comment][page-allocation].
-+
-+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
-
-
- ## Other Projects Using Bolt
-
- Below is a list of public, open source projects that use Bolt:
-
--* [Bazil](https://github.com/bazillion/bazil) - A file system that lets your data reside where it is most convenient for it to reside.
-+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
- * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
- * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
- * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-@@ -450,6 +656,16 @@ Below is a list of public, open source projects that use Bolt:
- * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
- * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
- * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
-+* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read.
-+* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics.
-+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
-+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
-+* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
-+ backed by boltdb.
-+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
-+ simple tx and key scans.
-
- If you are using Bolt in a project please send a pull request to add it to the list.
--
-diff --git a/vendor/src/github.com/boltdb/bolt/batch.go b/vendor/src/github.com/boltdb/bolt/batch.go
-new file mode 100644
-index 0000000..84acae6
---- /dev/null
-+++ b/vendor/src/github.com/boltdb/bolt/batch.go
-@@ -0,0 +1,138 @@
-+package bolt
-+
-+import (
-+ "errors"
-+ "fmt"
-+ "sync"
-+ "time"
-+)
-+
-+// Batch calls fn as part of a batch. It behaves similar to Update,
-+// except:
-+//
-+// 1. concurrent Batch calls can be combined into a single Bolt
-+// transaction.
-+//
-+// 2. the function passed to Batch may be called multiple times,
-+// regardless of whether it returns error or not.
-+//
-+// This means that Batch function side effects must be idempotent and
-+// take permanent effect only after a successful return is seen in
-+// caller.
-+//
-+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-+// and DB.MaxBatchDelay, respectively.
-+//
-+// Batch is only useful when there are multiple goroutines calling it.
-+func (db *DB) Batch(fn func(*Tx) error) error {
-+ errCh := make(chan error, 1)
-+
-+ db.batchMu.Lock()
-+ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
-+ // There is no existing batch, or the existing batch is full; start a new one.
-+ db.batch = &batch{
-+ db: db,
-+ }
-+ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
-+ }
-+ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
-+ if len(db.batch.calls) >= db.MaxBatchSize {
-+ // wake up batch, it's ready to run
-+ go db.batch.trigger()
-+ }
-+ db.batchMu.Unlock()
-+
-+ err := <-errCh
-+ if err == trySolo {
-+ err = db.Update(fn)
-+ }
-+ return err
-+}
-+
-+type call struct {
-+ fn func(*Tx) error
-+ err chan<- error
-+}
-+
-+type batch struct {
-+ db *DB
-+ timer *time.Timer
-+ start sync.Once
-+ calls []call
-+}
-+
-+// trigger runs the batch if it hasn't already been run.
-+func (b *batch) trigger() {
-+ b.start.Do(b.run)
-+}
-+
-+// run performs the transactions in the batch and communicates results
-+// back to DB.Batch.
-+func (b *batch) run() {
-+ b.db.batchMu.Lock()
-+ b.timer.Stop()
-+ // Make sure no new work is added to this batch, but don't break
-+ // other batches.
-+ if b.db.batch == b {
-+ b.db.batch = nil
-+ }
-+ b.db.batchMu.Unlock()
-+
-+retry:
-+ for len(b.calls) > 0 {
-+ var failIdx = -1
-+ err := b.db.Update(func(tx *Tx) error {
-+ for i, c := range b.calls {
-+ if err := safelyCall(c.fn, tx); err != nil {
-+ failIdx = i
-+ return err
-+ }
-+ }
-+ return nil
-+ })
-+
-+ if failIdx >= 0 {
-+ // take the failing transaction out of the batch. it's
-+ // safe to shorten b.calls here because db.batch no longer
-+ // points to us, and we hold the mutex anyway.
-+ c := b.calls[failIdx]
-+ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
-+ // tell the submitter re-run it solo, continue with the rest of the batch
-+ c.err <- trySolo
-+ continue retry
-+ }
-+
-+ // pass success, or bolt internal errors, to all callers
-+ for _, c := range b.calls {
-+ if c.err != nil {
-+ c.err <- err
-+ }
-+ }
-+ break retry
-+ }
-+}
-+
-+// trySolo is a special sentinel error value used for signaling that a
-+// transaction function should be re-run. It should never be seen by
-+// callers.
-+var trySolo = errors.New("batch function returned an error and should be re-run solo")
-+
-+type panicked struct {
-+ reason interface{}
-+}
-+
-+func (p panicked) Error() string {
-+ if err, ok := p.reason.(error); ok {
-+ return err.Error()
-+ }
-+ return fmt.Sprintf("panic: %v", p.reason)
-+}
-+
-+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
-+ defer func() {
-+ if p := recover(); p != nil {
-+ err = panicked{p}
-+ }
-+ }()
-+ return fn(tx)
-+}
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_386.go b/vendor/src/github.com/boltdb/bolt/bolt_386.go
-index 856f401..e659bfb 100644
---- a/vendor/src/github.com/boltdb/bolt/bolt_386.go
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_386.go
-@@ -1,4 +1,7 @@
- package bolt
-
- // maxMapSize represents the largest mmap size supported by Bolt.
--const maxMapSize = 0xFFFFFFF // 256MB
-+const maxMapSize = 0x7FFFFFFF // 2GB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0xFFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
-index 4262932..cca6b7e 100644
---- a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
-@@ -2,3 +2,6 @@ package bolt
-
- // maxMapSize represents the largest mmap size supported by Bolt.
- const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0x7FFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
-index 856f401..e659bfb 100644
---- a/vendor/src/github.com/boltdb/bolt/bolt_arm.go
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
-@@ -1,4 +1,7 @@
- package bolt
-
- // maxMapSize represents the largest mmap size supported by Bolt.
--const maxMapSize = 0xFFFFFFF // 256MB
-+const maxMapSize = 0x7FFFFFFF // 2GB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0xFFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
-new file mode 100644
-index 0000000..6d23093
---- /dev/null
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
-@@ -0,0 +1,9 @@
-+// +build arm64
-+
-+package bolt
-+
-+// maxMapSize represents the largest mmap size supported by Bolt.
-+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0x7FFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
-new file mode 100644
-index 0000000..8351e12
---- /dev/null
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
-@@ -0,0 +1,9 @@
-+// +build ppc64le
-+
-+package bolt
-+
-+// maxMapSize represents the largest mmap size supported by Bolt.
-+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0x7FFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
-new file mode 100644
-index 0000000..f4dd26b
---- /dev/null
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
-@@ -0,0 +1,9 @@
-+// +build s390x
-+
-+package bolt
-+
-+// maxMapSize represents the largest mmap size supported by Bolt.
-+const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-+
-+// maxAllocSize is the size used when creating array pointers.
-+const maxAllocSize = 0x7FFFFFFF
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
-index 95647a7..6eef6b2 100644
---- a/vendor/src/github.com/boltdb/bolt/bolt_unix.go
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
-@@ -1,8 +1,9 @@
--// +build !windows,!plan9
-+// +build !windows,!plan9,!solaris
-
- package bolt
-
- import (
-+ "fmt"
- "os"
- "syscall"
- "time"
-@@ -10,7 +11,7 @@ import (
- )
-
- // flock acquires an advisory lock on a file descriptor.
--func flock(f *os.File, timeout time.Duration) error {
-+func flock(f *os.File, exclusive bool, timeout time.Duration) error {
- var t time.Time
- for {
- // If we're beyond our timeout then return an error.
-@@ -20,9 +21,13 @@ func flock(f *os.File, timeout time.Duration) error {
- } else if timeout > 0 && time.Since(t) > timeout {
- return ErrTimeout
- }
-+ flag := syscall.LOCK_SH
-+ if exclusive {
-+ flag = syscall.LOCK_EX
-+ }
-
- // Otherwise attempt to obtain an exclusive lock.
-- err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
-+ err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
- if err == nil {
- return nil
- } else if err != syscall.EWOULDBLOCK {
-@@ -41,11 +46,28 @@ func funlock(f *os.File) error {
-
- // mmap memory maps a DB's data file.
- func mmap(db *DB, sz int) error {
-+ // Truncate and fsync to ensure file size metadata is flushed.
-+ // https://github.com/boltdb/bolt/issues/284
-+ if !db.NoGrowSync && !db.readOnly {
-+ if err := db.file.Truncate(int64(sz)); err != nil {
-+ return fmt.Errorf("file resize error: %s", err)
-+ }
-+ if err := db.file.Sync(); err != nil {
-+ return fmt.Errorf("file sync error: %s", err)
-+ }
-+ }
-+
-+ // Map the data file to memory.
- b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
- if err != nil {
- return err
- }
-
-+ // Advise the kernel that the mmap is accessed randomly.
-+ if err := madvise(b, syscall.MADV_RANDOM); err != nil {
-+ return fmt.Errorf("madvise: %s", err)
-+ }
-+
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
-@@ -67,3 +89,12 @@ func munmap(db *DB) error {
- db.datasz = 0
- return err
- }
-+
-+// NOTE: This function is copied from stdlib because it is not available on darwin.
-+func madvise(b []byte, advice int) (err error) {
-+ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
-+ if e1 != 0 {
-+ err = e1
-+ }
-+ return
-+}
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
-new file mode 100644
-index 0000000..f480ee7
---- /dev/null
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
-@@ -0,0 +1,101 @@
-+
-+package bolt
-+
-+import (
-+ "fmt"
-+ "os"
-+ "syscall"
-+ "time"
-+ "unsafe"
-+ "golang.org/x/sys/unix"
-+)
-+
-+// flock acquires an advisory lock on a file descriptor.
-+func flock(f *os.File, exclusive bool, timeout time.Duration) error {
-+ var t time.Time
-+ for {
-+ // If we're beyond our timeout then return an error.
-+ // This can only occur after we've attempted a flock once.
-+ if t.IsZero() {
-+ t = time.Now()
-+ } else if timeout > 0 && time.Since(t) > timeout {
-+ return ErrTimeout
-+ }
-+ var lock syscall.Flock_t
-+ lock.Start = 0
-+ lock.Len = 0
-+ lock.Pid = 0
-+ lock.Whence = 0
-+ lock.Pid = 0
-+ if exclusive {
-+ lock.Type = syscall.F_WRLCK
-+ } else {
-+ lock.Type = syscall.F_RDLCK
-+ }
-+ err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
-+ if err == nil {
-+ return nil
-+ } else if err != syscall.EAGAIN {
-+ return err
-+ }
-+
-+ // Wait for a bit and try again.
-+ time.Sleep(50 * time.Millisecond)
-+ }
-+}
-+
-+// funlock releases an advisory lock on a file descriptor.
-+func funlock(f *os.File) error {
-+ var lock syscall.Flock_t
-+ lock.Start = 0
-+ lock.Len = 0
-+ lock.Type = syscall.F_UNLCK
-+ lock.Whence = 0
-+ return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
-+}
-+
-+// mmap memory maps a DB's data file.
-+func mmap(db *DB, sz int) error {
-+ // Truncate and fsync to ensure file size metadata is flushed.
-+ // https://github.com/boltdb/bolt/issues/284
-+ if !db.NoGrowSync && !db.readOnly {
-+ if err := db.file.Truncate(int64(sz)); err != nil {
-+ return fmt.Errorf("file resize error: %s", err)
-+ }
-+ if err := db.file.Sync(); err != nil {
-+ return fmt.Errorf("file sync error: %s", err)
-+ }
-+ }
-+
-+ // Map the data file to memory.
-+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
-+ if err != nil {
-+ return err
-+ }
-+
-+ // Advise the kernel that the mmap is accessed randomly.
-+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
-+ return fmt.Errorf("madvise: %s", err)
-+ }
-+
-+ // Save the original byte slice and convert to a byte array pointer.
-+ db.dataref = b
-+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
-+ db.datasz = sz
-+ return nil
-+}
-+
-+// munmap unmaps a DB's data file from memory.
-+func munmap(db *DB) error {
-+ // Ignore the unmap if we have no mapped data.
-+ if db.dataref == nil {
-+ return nil
-+ }
-+
-+ // Unmap using the original byte slice.
-+ err := unix.Munmap(db.dataref)
-+ db.dataref = nil
-+ db.data = nil
-+ db.datasz = 0
-+ return err
-+}
-diff --git a/vendor/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
-index c8539d4..8b782be 100644
---- a/vendor/src/github.com/boltdb/bolt/bolt_windows.go
-+++ b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
-@@ -16,7 +16,7 @@ func fdatasync(db *DB) error {
- }
-
- // flock acquires an advisory lock on a file descriptor.
--func flock(f *os.File, _ time.Duration) error {
-+func flock(f *os.File, _ bool, _ time.Duration) error {
- return nil
- }
-
-@@ -28,9 +28,11 @@ func funlock(f *os.File) error {
- // mmap memory maps a DB's data file.
- // Based on: https://github.com/edsrzf/mmap-go
- func mmap(db *DB, sz int) error {
-- // Truncate the database to the size of the mmap.
-- if err := db.file.Truncate(int64(sz)); err != nil {
-- return fmt.Errorf("truncate: %s", err)
-+ if !db.readOnly {
-+ // Truncate the database to the size of the mmap.
-+ if err := db.file.Truncate(int64(sz)); err != nil {
-+ return fmt.Errorf("truncate: %s", err)
-+ }
- }
-
- // Open a file mapping handle.
-diff --git a/vendor/src/github.com/boltdb/bolt/bucket.go b/vendor/src/github.com/boltdb/bolt/bucket.go
-index 2630800..2925288 100644
---- a/vendor/src/github.com/boltdb/bolt/bucket.go
-+++ b/vendor/src/github.com/boltdb/bolt/bucket.go
-@@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor {
-
- // Bucket retrieves a nested bucket by name.
- // Returns nil if the bucket does not exist.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (b *Bucket) Bucket(name []byte) *Bucket {
- if b.buckets != nil {
- if child := b.buckets[string(name)]; child != nil {
-@@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
-
- // CreateBucket creates a new bucket at the given key and returns the new bucket.
- // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
- if b.tx.db == nil {
- return nil, ErrTxClosed
-@@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
-
- // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
- // Returns an error if the bucket name is blank, or if the bucket name is too long.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
- child, err := b.CreateBucket(key)
- if err == ErrBucketExists {
-@@ -252,6 +255,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
-
- // Get retrieves the value for a key in the bucket.
- // Returns a nil value if the key does not exist or if the key is a nested bucket.
-+// The returned value is only valid for the life of the transaction.
- func (b *Bucket) Get(key []byte) []byte {
- k, v, flags := b.Cursor().seek(key)
-
-@@ -332,6 +336,12 @@ func (b *Bucket) NextSequence() (uint64, error) {
- return 0, ErrTxNotWritable
- }
-
-+ // Materialize the root node if it hasn't been already so that the
-+ // bucket will be saved during commit.
-+ if b.rootNode == nil {
-+ _ = b.node(b.root, nil)
-+ }
-+
- // Increment and return the sequence.
- b.bucket.sequence++
- return b.bucket.sequence, nil
-@@ -339,7 +349,8 @@ func (b *Bucket) NextSequence() (uint64, error) {
-
- // ForEach executes a function for each key/value pair in a bucket.
- // If the provided function returns an error then the iteration is stopped and
--// the error is returned to the caller.
-+// the error is returned to the caller. The provided function must not modify
-+// the bucket; this will result in undefined behavior.
- func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
- if b.tx.db == nil {
- return ErrTxClosed
-@@ -511,8 +522,12 @@ func (b *Bucket) spill() error {
- // Update parent node.
- var c = b.Cursor()
- k, _, flags := c.seek([]byte(name))
-- _assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
-- _assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
-+ if !bytes.Equal([]byte(name), k) {
-+ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
-+ }
-+ if flags&bucketLeafFlag == 0 {
-+ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
-+ }
- c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
- }
-
-@@ -528,7 +543,9 @@ func (b *Bucket) spill() error {
- b.rootNode = b.rootNode.root()
-
- // Update the root node for this bucket.
-- _assert(b.rootNode.pgid < b.tx.meta.pgid, "pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)
-+ if b.rootNode.pgid >= b.tx.meta.pgid {
-+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
-+ }
- b.root = b.rootNode.pgid
-
- return nil
-@@ -659,7 +676,9 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
- // Inline buckets have a fake page embedded in their value so treat them
- // differently. We'll return the rootNode (if available) or the fake page.
- if b.root == 0 {
-- _assert(id == 0, "inline bucket non-zero page access(2): %d != 0", id)
-+ if id != 0 {
-+ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
-+ }
- if b.rootNode != nil {
- return nil, b.rootNode
- }
-diff --git a/vendor/src/github.com/boltdb/bolt/cursor.go b/vendor/src/github.com/boltdb/bolt/cursor.go
-index 3bfc2f1..006c548 100644
---- a/vendor/src/github.com/boltdb/bolt/cursor.go
-+++ b/vendor/src/github.com/boltdb/bolt/cursor.go
-@@ -2,6 +2,7 @@ package bolt
-
- import (
- "bytes"
-+ "fmt"
- "sort"
- )
-
-@@ -9,6 +10,8 @@ import (
- // Cursors see nested buckets with value == nil.
- // Cursors can be obtained from a transaction and are valid as long as the transaction is open.
- //
-+// Keys and values returned from the cursor are only valid for the life of the transaction.
-+//
- // Changing data while traversing with a cursor may cause it to be invalidated
- // and return unexpected keys and/or values. You must reposition your cursor
- // after mutating data.
-@@ -24,6 +27,7 @@ func (c *Cursor) Bucket() *Bucket {
-
- // First moves the cursor to the first item in the bucket and returns its key and value.
- // If the bucket is empty then a nil key and value are returned.
-+// The returned key and value are only valid for the life of the transaction.
- func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
-@@ -40,6 +44,7 @@ func (c *Cursor) First() (key []byte, value []byte) {
-
- // Last moves the cursor to the last item in the bucket and returns its key and value.
- // If the bucket is empty then a nil key and value are returned.
-+// The returned key and value are only valid for the life of the transaction.
- func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
-@@ -57,6 +62,7 @@ func (c *Cursor) Last() (key []byte, value []byte) {
-
- // Next moves the cursor to the next item in the bucket and returns its key and value.
- // If the cursor is at the end of the bucket then a nil key and value are returned.
-+// The returned key and value are only valid for the life of the transaction.
- func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- k, v, flags := c.next()
-@@ -68,6 +74,7 @@ func (c *Cursor) Next() (key []byte, value []byte) {
-
- // Prev moves the cursor to the previous item in the bucket and returns its key and value.
- // If the cursor is at the beginning of the bucket then a nil key and value are returned.
-+// The returned key and value are only valid for the life of the transaction.
- func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
-@@ -99,6 +106,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
- // Seek moves the cursor to a given key and returns it.
- // If the key does not exist then the next key is used. If no keys
- // follow, a nil key is returned.
-+// The returned key and value are only valid for the life of the transaction.
- func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
- k, v, flags := c.seek(seek)
-
-@@ -228,8 +236,8 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
- // search recursively performs a binary search against a given page/node until it finds a given key.
- func (c *Cursor) search(key []byte, pgid pgid) {
- p, n := c.bucket.pageNode(pgid)
-- if p != nil {
-- _assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: %d: %x", p.id, p.flags)
-+ if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
-+ panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
- }
- e := elemRef{page: p, node: n}
- c.stack = append(c.stack, e)
-diff --git a/vendor/src/github.com/boltdb/bolt/db.go b/vendor/src/github.com/boltdb/bolt/db.go
-index 6c45736..d39c4aa 100644
---- a/vendor/src/github.com/boltdb/bolt/db.go
-+++ b/vendor/src/github.com/boltdb/bolt/db.go
-@@ -12,9 +12,6 @@ import (
- "unsafe"
- )
-
--// The smallest size that the mmap can be.
--const minMmapSize = 1 << 22 // 4MB
--
- // The largest step that can be taken when remapping the mmap.
- const maxMmapStep = 1 << 30 // 1GB
-
-@@ -30,6 +27,12 @@ const magic uint32 = 0xED0CDAED
- // must be synchronzied using the msync(2) syscall.
- const IgnoreNoSync = runtime.GOOS == "openbsd"
-
-+// Default values if not set in a DB instance.
-+const (
-+ DefaultMaxBatchSize int = 1000
-+ DefaultMaxBatchDelay = 10 * time.Millisecond
-+)
-+
- // DB represents a collection of buckets persisted to a file on disk.
- // All data access is performed through transactions which can be obtained through the DB.
- // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
-@@ -52,9 +55,33 @@ type DB struct {
- // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
- NoSync bool
-
-+ // When true, skips the truncate call when growing the database.
-+ // Setting this to true is only safe on non-ext3/ext4 systems.
-+ // Skipping truncation avoids preallocation of hard drive space and
-+ // bypasses a truncate() and fsync() syscall on remapping.
-+ //
-+ // https://github.com/boltdb/bolt/issues/284
-+ NoGrowSync bool
-+
-+ // MaxBatchSize is the maximum size of a batch. Default value is
-+ // copied from DefaultMaxBatchSize in Open.
-+ //
-+ // If <=0, disables batching.
-+ //
-+ // Do not change concurrently with calls to Batch.
-+ MaxBatchSize int
-+
-+ // MaxBatchDelay is the maximum delay before a batch starts.
-+ // Default value is copied from DefaultMaxBatchDelay in Open.
-+ //
-+ // If <=0, effectively disables batching.
-+ //
-+ // Do not change concurrently with calls to Batch.
-+ MaxBatchDelay time.Duration
-+
- path string
- file *os.File
-- dataref []byte
-+ dataref []byte // mmap'ed readonly, write throws SEGV
- data *[maxMapSize]byte
- datasz int
- meta0 *meta
-@@ -66,6 +93,9 @@ type DB struct {
- freelist *freelist
- stats Stats
-
-+ batchMu sync.Mutex
-+ batch *batch
-+
- rwlock sync.Mutex // Allows only one writer at a time.
- metalock sync.Mutex // Protects meta page access.
- mmaplock sync.RWMutex // Protects mmap access during remapping.
-@@ -74,6 +104,10 @@ type DB struct {
- ops struct {
- writeAt func(b []byte, off int64) (n int, err error)
- }
-+
-+ // Read only mode.
-+ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
-+ readOnly bool
- }
-
- // Path returns the path to currently open database file.
-@@ -101,20 +135,34 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
- if options == nil {
- options = DefaultOptions
- }
-+ db.NoGrowSync = options.NoGrowSync
-+
-+ // Set default values for later DB operations.
-+ db.MaxBatchSize = DefaultMaxBatchSize
-+ db.MaxBatchDelay = DefaultMaxBatchDelay
-+
-+ flag := os.O_RDWR
-+ if options.ReadOnly {
-+ flag = os.O_RDONLY
-+ db.readOnly = true
-+ }
-
- // Open data file and separate sync handler for metadata writes.
- db.path = path
--
- var err error
-- if db.file, err = os.OpenFile(db.path, os.O_RDWR|os.O_CREATE, mode); err != nil {
-+ if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
- _ = db.close()
- return nil, err
- }
-
-- // Lock file so that other processes using Bolt cannot use the database
-- // at the same time. This would cause corruption since the two processes
-- // would write meta pages and free pages separately.
-- if err := flock(db.file, options.Timeout); err != nil {
-+ // Lock file so that other processes using Bolt in read-write mode cannot
-+ // use the database at the same time. This would cause corruption since
-+ // the two processes would write meta pages and free pages separately.
-+ // The database file is locked exclusively (only one process can grab the lock)
-+ // if !options.ReadOnly.
-+ // The database file is locked using the shared lock (more than one process may
-+ // hold a lock at the same time) otherwise (options.ReadOnly is set).
-+ if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
- _ = db.close()
- return nil, err
- }
-@@ -162,16 +210,6 @@ func (db *DB) mmap(minsz int) error {
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
-- // Dereference all mmap references before unmapping.
-- if db.rwtx != nil {
-- db.rwtx.root.dereference()
-- }
--
-- // Unmap existing data before continuing.
-- if err := db.munmap(); err != nil {
-- return err
-- }
--
- info, err := db.file.Stat()
- if err != nil {
- return fmt.Errorf("mmap stat error: %s", err)
-@@ -184,7 +222,20 @@ func (db *DB) mmap(minsz int) error {
- if size < minsz {
- size = minsz
- }
-- size = db.mmapSize(size)
-+ size, err = db.mmapSize(size)
-+ if err != nil {
-+ return err
-+ }
-+
-+ // Dereference all mmap references before unmapping.
-+ if db.rwtx != nil {
-+ db.rwtx.root.dereference()
-+ }
-+
-+ // Unmap existing data before continuing.
-+ if err := db.munmap(); err != nil {
-+ return err
-+ }
-
- // Memory-map the data file as a byte slice.
- if err := mmap(db, size); err != nil {
-@@ -215,22 +266,40 @@ func (db *DB) munmap() error {
- }
-
- // mmapSize determines the appropriate size for the mmap given the current size
--// of the database. The minimum size is 4MB and doubles until it reaches 1GB.
--func (db *DB) mmapSize(size int) int {
-- if size <= minMmapSize {
-- return minMmapSize
-- } else if size < maxMmapStep {
-- size *= 2
-- } else {
-- size += maxMmapStep
-+// of the database. The minimum size is 1MB and doubles until it reaches 1GB.
-+// Returns an error if the new mmap size is greater than the max allowed.
-+func (db *DB) mmapSize(size int) (int, error) {
-+ // Double the size from 32KB until 1GB.
-+ for i := uint(15); i <= 30; i++ {
-+ if size <= 1<<i {
-+ return 1 << i, nil
-+ }
-+ }
-+
-+ // Verify the requested size is not above the maximum allowed.
-+ if size > maxMapSize {
-+ return 0, fmt.Errorf("mmap too large")
-+ }
-+
-+ // If larger than 1GB then grow by 1GB at a time.
-+ sz := int64(size)
-+ if remainder := sz % int64(maxMmapStep); remainder > 0 {
-+ sz += int64(maxMmapStep) - remainder
- }
-
- // Ensure that the mmap size is a multiple of the page size.
-- if (size % db.pageSize) != 0 {
-- size = ((size / db.pageSize) + 1) * db.pageSize
-+ // This should always be true since we're incrementing in MBs.
-+ pageSize := int64(db.pageSize)
-+ if (sz % pageSize) != 0 {
-+ sz = ((sz / pageSize) + 1) * pageSize
-+ }
-+
-+ // If we've exceeded the max size then only grow up to the max size.
-+ if sz > maxMapSize {
-+ sz = maxMapSize
- }
-
-- return size
-+ return int(sz), nil
- }
-
- // init creates a new database file and initializes its meta pages.
-@@ -250,7 +319,6 @@ func (db *DB) init() error {
- m.magic = magic
- m.version = version
- m.pageSize = uint32(db.pageSize)
-- m.version = version
- m.freelist = 2
- m.root = bucket{root: 3}
- m.pgid = 4
-@@ -283,8 +351,15 @@ func (db *DB) init() error {
- // Close releases all database resources.
- // All transactions must be closed before closing the database.
- func (db *DB) Close() error {
-+ db.rwlock.Lock()
-+ defer db.rwlock.Unlock()
-+
- db.metalock.Lock()
- defer db.metalock.Unlock()
-+
-+ db.mmaplock.RLock()
-+ defer db.mmaplock.RUnlock()
-+
- return db.close()
- }
-
-@@ -304,8 +379,11 @@ func (db *DB) close() error {
-
- // Close file handles.
- if db.file != nil {
-- // Unlock the file.
-- _ = funlock(db.file)
-+ // No need to unlock read-only file.
-+ if !db.readOnly {
-+ // Unlock the file.
-+ _ = funlock(db.file)
-+ }
-
- // Close the file descriptor.
- if err := db.file.Close(); err != nil {
-@@ -323,6 +401,11 @@ func (db *DB) close() error {
- // will cause the calls to block and be serialized until the current write
- // transaction finishes.
- //
-+// Transactions should not be depedent on one another. Opening a read
-+// transaction and a write transaction in the same goroutine can cause the
-+// writer to deadlock because the database periodically needs to re-mmap itself
-+// as it grows and it cannot do that while a read transaction is open.
-+//
- // IMPORTANT: You must close read-only transactions after you are finished or
- // else the database will not reclaim old pages.
- func (db *DB) Begin(writable bool) (*Tx, error) {
-@@ -371,6 +454,11 @@ func (db *DB) beginTx() (*Tx, error) {
- }
-
- func (db *DB) beginRWTx() (*Tx, error) {
-+ // If the database was opened with Options.ReadOnly, return an error.
-+ if db.readOnly {
-+ return nil, ErrDatabaseReadOnly
-+ }
-+
- // Obtain writer lock. This is released by the transaction when it closes.
- // This enforces only one writer transaction at a time.
- db.rwlock.Lock()
-@@ -501,6 +589,12 @@ func (db *DB) View(fn func(*Tx) error) error {
- return nil
- }
-
-+// Sync executes fdatasync() against the database file handle.
-+//
-+// This is not necessary under normal operation, however, if you use NoSync
-+// then it allows you to force the database file to sync against the disk.
-+func (db *DB) Sync() error { return fdatasync(db) }
-+
- // Stats retrieves ongoing performance stats for the database.
- // This is only updated when a transaction closes.
- func (db *DB) Stats() Stats {
-@@ -561,18 +655,30 @@ func (db *DB) allocate(count int) (*page, error) {
- return p, nil
- }
-
-+func (db *DB) IsReadOnly() bool {
-+ return db.readOnly
-+}
-+
- // Options represents the options that can be set when opening a database.
- type Options struct {
- // Timeout is the amount of time to wait to obtain a file lock.
- // When set to zero it will wait indefinitely. This option is only
- // available on Darwin and Linux.
- Timeout time.Duration
-+
-+ // Sets the DB.NoGrowSync flag before memory mapping the file.
-+ NoGrowSync bool
-+
-+ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
-+ // grab a shared lock (UNIX).
-+ ReadOnly bool
- }
-
- // DefaultOptions represent the options used if nil options are passed into Open().
- // No timeout is used which will cause Bolt to wait indefinitely for a lock.
- var DefaultOptions = &Options{
-- Timeout: 0,
-+ Timeout: 0,
-+ NoGrowSync: false,
- }
-
- // Stats represents statistics about the database.
-@@ -647,9 +753,11 @@ func (m *meta) copy(dest *meta) {
-
- // write writes the meta onto a page.
- func (m *meta) write(p *page) {
--
-- _assert(m.root.root < m.pgid, "root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)
-- _assert(m.freelist < m.pgid, "freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)
-+ if m.root.root >= m.pgid {
-+ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
-+ } else if m.freelist >= m.pgid {
-+ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
-+ }
-
- // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
- p.id = pgid(m.txid % 2)
-@@ -675,13 +783,8 @@ func _assert(condition bool, msg string, v ...interface{}) {
- }
- }
-
--func warn(v ...interface{}) {
-- fmt.Fprintln(os.Stderr, v...)
--}
--
--func warnf(msg string, v ...interface{}) {
-- fmt.Fprintf(os.Stderr, msg+"\n", v...)
--}
-+func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
-+func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
-
- func printstack() {
- stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
-diff --git a/vendor/src/github.com/boltdb/bolt/errors.go b/vendor/src/github.com/boltdb/bolt/errors.go
-index aa504f1..6883786 100644
---- a/vendor/src/github.com/boltdb/bolt/errors.go
-+++ b/vendor/src/github.com/boltdb/bolt/errors.go
-@@ -36,6 +36,10 @@ var (
- // ErrTxClosed is returned when committing or rolling back a transaction
- // that has already been committed or rolled back.
- ErrTxClosed = errors.New("tx closed")
-+
-+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
-+ // read-only database.
-+ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
- )
-
- // These errors can occur when putting or deleting a value or a bucket.
-diff --git a/vendor/src/github.com/boltdb/bolt/freelist.go b/vendor/src/github.com/boltdb/bolt/freelist.go
-index 150e3e6..0161948 100644
---- a/vendor/src/github.com/boltdb/bolt/freelist.go
-+++ b/vendor/src/github.com/boltdb/bolt/freelist.go
-@@ -1,6 +1,7 @@
- package bolt
-
- import (
-+ "fmt"
- "sort"
- "unsafe"
- )
-@@ -47,15 +48,14 @@ func (f *freelist) pending_count() int {
-
- // all returns a list of all free ids and all pending ids in one sorted list.
- func (f *freelist) all() []pgid {
-- ids := make([]pgid, len(f.ids))
-- copy(ids, f.ids)
-+ m := make(pgids, 0)
-
- for _, list := range f.pending {
-- ids = append(ids, list...)
-+ m = append(m, list...)
- }
-
-- sort.Sort(pgids(ids))
-- return ids
-+ sort.Sort(m)
-+ return pgids(f.ids).merge(m)
- }
-
- // allocate returns the starting page id of a contiguous list of pages of a given size.
-@@ -67,7 +67,9 @@ func (f *freelist) allocate(n int) pgid {
-
- var initial, previd pgid
- for i, id := range f.ids {
-- _assert(id > 1, "invalid page allocation: %d", id)
-+ if id <= 1 {
-+ panic(fmt.Sprintf("invalid page allocation: %d", id))
-+ }
-
- // Reset initial page if this is not contiguous.
- if previd == 0 || id-previd != 1 {
-@@ -103,13 +105,17 @@ func (f *freelist) allocate(n int) pgid {
- // free releases a page and its overflow for a given transaction id.
- // If the page is already free then a panic will occur.
- func (f *freelist) free(txid txid, p *page) {
-- _assert(p.id > 1, "cannot free page 0 or 1: %d", p.id)
-+ if p.id <= 1 {
-+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
-+ }
-
- // Free page and all its overflow pages.
- var ids = f.pending[txid]
- for id := p.id; id <= p.id+pgid(p.overflow); id++ {
- // Verify that page is not already free.
-- _assert(!f.cache[id], "page %d already freed", id)
-+ if f.cache[id] {
-+ panic(fmt.Sprintf("page %d already freed", id))
-+ }
-
- // Add to the freelist and cache.
- ids = append(ids, id)
-@@ -120,15 +126,17 @@ func (f *freelist) free(txid txid, p *page) {
-
- // release moves all page ids for a transaction id (or older) to the freelist.
- func (f *freelist) release(txid txid) {
-+ m := make(pgids, 0)
- for tid, ids := range f.pending {
- if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
-- f.ids = append(f.ids, ids...)
-+ m = append(m, ids...)
- delete(f.pending, tid)
- }
- }
-- sort.Sort(pgids(f.ids))
-+ sort.Sort(m)
-+ f.ids = pgids(f.ids).merge(m)
- }
-
- // rollback removes the pages from a given pending tx.
-diff --git a/vendor/src/github.com/boltdb/bolt/node.go b/vendor/src/github.com/boltdb/bolt/node.go
-index c204c39..c9fb21c 100644
---- a/vendor/src/github.com/boltdb/bolt/node.go
-+++ b/vendor/src/github.com/boltdb/bolt/node.go
-@@ -2,6 +2,7 @@ package bolt
-
- import (
- "bytes"
-+ "fmt"
- "sort"
- "unsafe"
- )
-@@ -70,7 +71,9 @@ func (n *node) pageElementSize() int {
-
- // childAt returns the child node at a given index.
- func (n *node) childAt(index int) *node {
-- _assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
-+ if n.isLeaf {
-+ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
-+ }
- return n.bucket.node(n.inodes[index].pgid, n)
- }
-
-@@ -111,9 +114,13 @@ func (n *node) prevSibling() *node {
-
- // put inserts a key/value.
- func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
-- _assert(pgid < n.bucket.tx.meta.pgid, "pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)
-- _assert(len(oldKey) > 0, "put: zero-length old key")
-- _assert(len(newKey) > 0, "put: zero-length new key")
-+ if pgid >= n.bucket.tx.meta.pgid {
-+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
-+ } else if len(oldKey) <= 0 {
-+ panic("put: zero-length old key")
-+ } else if len(newKey) <= 0 {
-+ panic("put: zero-length new key")
-+ }
-
- // Find insertion index.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
-@@ -189,7 +196,9 @@ func (n *node) write(p *page) {
- p.flags |= branchPageFlag
- }
-
-- _assert(len(n.inodes) < 0xFFFF, "inode overflow: %d (pgid=%d)", len(n.inodes), p.id)
-+ if len(n.inodes) >= 0xFFFF {
-+ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
-+ }
- p.count = uint16(len(n.inodes))
-
- // Loop over each item and write it to the page.
-@@ -212,11 +221,20 @@ func (n *node) write(p *page) {
- _assert(elem.pgid != p.id, "write: circular dependency occurred")
- }
-
-+ // If the length of key+value is larger than the max allocation size
-+ // then we need to reallocate the byte array pointer.
-+ //
-+ // See: https://github.com/boltdb/bolt/pull/335
-+ klen, vlen := len(item.key), len(item.value)
-+ if len(b) < klen+vlen {
-+ b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
-+ }
-+
- // Write data for the element to the end of the page.
- copy(b[0:], item.key)
-- b = b[len(item.key):]
-+ b = b[klen:]
- copy(b[0:], item.value)
-- b = b[len(item.value):]
-+ b = b[vlen:]
- }
-
- // DEBUG ONLY: n.dump()
-@@ -348,7 +366,9 @@ func (n *node) spill() error {
- }
-
- // Write the node.
-- _assert(p.id < tx.meta.pgid, "pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)
-+ if p.id >= tx.meta.pgid {
-+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
-+ }
- node.pgid = p.id
- node.write(p)
- node.spilled = true
-diff --git a/vendor/src/github.com/boltdb/bolt/page.go b/vendor/src/github.com/boltdb/bolt/page.go
-index b3dc473..818aa1b 100644
---- a/vendor/src/github.com/boltdb/bolt/page.go
-+++ b/vendor/src/github.com/boltdb/bolt/page.go
-@@ -3,12 +3,12 @@ package bolt
- import (
- "fmt"
- "os"
-+ "sort"
- "unsafe"
- )
-
- const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
-
--const maxAllocSize = 0xFFFFFFF
- const minKeysPerPage = 2
-
- const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
-@@ -97,7 +97,7 @@ type branchPageElement struct {
- // key returns a byte slice of the node key.
- func (n *branchPageElement) key() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-- return buf[n.pos : n.pos+n.ksize]
-+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
- }
-
- // leafPageElement represents a node on a leaf page.
-@@ -111,13 +111,13 @@ type leafPageElement struct {
- // key returns a byte slice of the node key.
- func (n *leafPageElement) key() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-- return buf[n.pos : n.pos+n.ksize]
-+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
- }
-
- // value returns a byte slice of the node value.
- func (n *leafPageElement) value() []byte {
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-- return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
-+ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
- }
-
- // PageInfo represents human readable information about a page.
-@@ -133,3 +133,40 @@ type pgids []pgid
- func (s pgids) Len() int { return len(s) }
- func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
- func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
-+
-+// merge returns the sorted union of a and b.
-+func (a pgids) merge(b pgids) pgids {
-+ // Return the opposite slice if one is nil.
-+ if len(a) == 0 {
-+ return b
-+ } else if len(b) == 0 {
-+ return a
-+ }
-+
-+ // Create a list to hold all elements from both lists.
-+ merged := make(pgids, 0, len(a)+len(b))
-+
-+ // Assign lead to the slice with a lower starting value, follow to the higher value.
-+ lead, follow := a, b
-+ if b[0] < a[0] {
-+ lead, follow = b, a
-+ }
-+
-+ // Continue while there are elements in the lead.
-+ for len(lead) > 0 {
-+ // Merge largest prefix of lead that is ahead of follow[0].
-+ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
-+ merged = append(merged, lead[:n]...)
-+ if n >= len(lead) {
-+ break
-+ }
-+
-+ // Swap lead and follow.
-+ lead, follow = follow, lead[n:]
-+ }
-+
-+ // Append what's left in follow.
-+ merged = append(merged, follow...)
-+
-+ return merged
-+}
-diff --git a/vendor/src/github.com/boltdb/bolt/tx.go b/vendor/src/github.com/boltdb/bolt/tx.go
-index c041d73..fe6c287 100644
---- a/vendor/src/github.com/boltdb/bolt/tx.go
-+++ b/vendor/src/github.com/boltdb/bolt/tx.go
-@@ -87,18 +87,21 @@ func (tx *Tx) Stats() TxStats {
-
- // Bucket retrieves a bucket by name.
- // Returns nil if the bucket does not exist.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (tx *Tx) Bucket(name []byte) *Bucket {
- return tx.root.Bucket(name)
- }
-
- // CreateBucket creates a new bucket.
- // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
- return tx.root.CreateBucket(name)
- }
-
- // CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
- // Returns an error if the bucket name is blank, or if the bucket name is too long.
-+// The bucket instance is only valid for the lifetime of the transaction.
- func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
- return tx.root.CreateBucketIfNotExists(name)
- }
-@@ -127,7 +130,8 @@ func (tx *Tx) OnCommit(fn func()) {
- }
-
- // Commit writes all changes to disk and updates the meta page.
--// Returns an error if a disk write error occurs.
-+// Returns an error if a disk write error occurs, or if Commit is
-+// called on a read-only transaction.
- func (tx *Tx) Commit() error {
- _assert(!tx.managed, "managed tx commit not allowed")
- if tx.db == nil {
-@@ -203,7 +207,8 @@ func (tx *Tx) Commit() error {
- return nil
- }
-
--// Rollback closes the transaction and ignores all previous updates.
-+// Rollback closes the transaction and ignores all previous updates. Read-only
-+// transactions must be rolled back and not committed.
- func (tx *Tx) Rollback() error {
- _assert(!tx.managed, "managed tx rollback not allowed")
- if tx.db == nil {
-@@ -234,7 +239,8 @@ func (tx *Tx) close() {
- var freelistPendingN = tx.db.freelist.pending_count()
- var freelistAlloc = tx.db.freelist.size()
-
-- // Remove writer lock.
-+ // Remove transaction ref & writer lock.
-+ tx.db.rwtx = nil
- tx.db.rwlock.Unlock()
-
- // Merge statistics.
-@@ -248,41 +254,51 @@ func (tx *Tx) close() {
- } else {
- tx.db.removeTx(tx)
- }
-+
-+ // Clear all references.
- tx.db = nil
-+ tx.meta = nil
-+ tx.root = Bucket{tx: tx}
-+ tx.pages = nil
- }
-
- // Copy writes the entire database to a writer.
--// A reader transaction is maintained during the copy so it is safe to continue
--// using the database while a copy is in progress.
--// Copy will write exactly tx.Size() bytes into the writer.
-+// This function exists for backwards compatibility. Use WriteTo() in
- func (tx *Tx) Copy(w io.Writer) error {
-- var f *os.File
-- var err error
-+ _, err := tx.WriteTo(w)
-+ return err
-+}
-
-+// WriteTo writes the entire database to a writer.
-+// If err == nil then exactly tx.Size() bytes will be written into the writer.
-+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
- // Attempt to open reader directly.
-+ var f *os.File
- if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil {
- // Fallback to a regular open if that doesn't work.
- if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
-- return err
-+ return 0, err
- }
- }
-
- // Copy the meta pages.
- tx.db.metalock.Lock()
-- _, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
-+ n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
- tx.db.metalock.Unlock()
- if err != nil {
- _ = f.Close()
-- return fmt.Errorf("meta copy: %s", err)
-+ return n, fmt.Errorf("meta copy: %s", err)
- }
-
- // Copy data pages.
-- if _, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)); err != nil {
-+ wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
-+ n += wn
-+ if err != nil {
- _ = f.Close()
-- return err
-+ return n, err
- }
-
-- return f.Close()
-+ return n, f.Close()
- }
-
- // CopyFile copies the entire database to file at the given path.
-@@ -416,15 +432,39 @@ func (tx *Tx) write() error {
- // Write pages to disk in order.
- for _, p := range pages {
- size := (int(p.overflow) + 1) * tx.db.pageSize
-- buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:size]
- offset := int64(p.id) * int64(tx.db.pageSize)
-- if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
-- return err
-- }
-
-- // Update statistics.
-- tx.stats.Write++
-+ // Write out page in "max allocation" sized chunks.
-+ ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
-+ for {
-+ // Limit our write to our max allocation size.
-+ sz := size
-+ if sz > maxAllocSize-1 {
-+ sz = maxAllocSize - 1
-+ }
-+
-+ // Write chunk to disk.
-+ buf := ptr[:sz]
-+ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
-+ return err
-+ }
-+
-+ // Update statistics.
-+ tx.stats.Write++
-+
-+ // Exit inner for loop if we've written all the chunks.
-+ size -= sz
-+ if size == 0 {
-+ break
-+ }
-+
-+ // Otherwise move offset forward and move pointer to next chunk.
-+ offset += int64(sz)
-+ ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
-+ }
- }
-+
-+ // Ignore file sync if flag is set on DB.
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
---
-1.9.1
-
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
deleted file mode 100644
index d37d7a060..000000000
--- a/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From 12fd6388a033ab5ec9b3a7b144c4976031e6aa52 Mon Sep 17 00:00:00 2001
-From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
-Date: Fri, 20 Nov 2015 10:02:09 +0000
-Subject: [PATCH] disable sha1sum startup
-
-Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
----
- utils/utils.go | 18 +-----------------
- 1 file changed, 1 insertion(+), 17 deletions(-)
-
-diff --git a/utils/utils.go b/utils/utils.go
-index a17ab9d..3fc514a 100644
---- a/utils/utils.go
-+++ b/utils/utils.go
-@@ -2,8 +2,6 @@ package utils
-
- import (
- "bufio"
-- "crypto/sha1"
-- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
-@@ -42,20 +40,6 @@ func SelfPath() string {
- return path
- }
-
--func dockerInitSha1(target string) string {
-- f, err := os.Open(target)
-- if err != nil {
-- return ""
-- }
-- defer f.Close()
-- h := sha1.New()
-- _, err = io.Copy(h, f)
-- if err != nil {
-- return ""
-- }
-- return hex.EncodeToString(h.Sum(nil))
--}
--
- func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
- if target == "" {
- return false
-@@ -77,7 +61,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and
- }
- return os.SameFile(targetFileInfo, selfPathFileInfo)
- }
-- return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1
-+ return true
- }
-
- // DockerInitPath figures out the path of our dockerinit (which may be SelfPath())
---
-1.9.1
-
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
index 680103190..eaa3319f4 100644
--- a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
@@ -5,7 +5,7 @@ After=network.target docker.socket
Requires=docker.socket
[Service]
-ExecStart=/usr/bin/docker -d -H fd:// --registry-mirror=http://localhost:5000 --insecure-registry=http://localhost:5000
+ExecStart=/usr/bin/docker daemon -H fd:// --registry-mirror=http://localhost:5000 --insecure-registry=http://localhost:5000 --raw-logs
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
index 34aab38a7..53068a804 100644
--- a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
@@ -18,6 +18,7 @@ RDEPENDS_${PN} = " \
perl-module-constant \
perl-module-overload \
perl-module-exporter-heavy \
+ glibc-utils \
"
RDEPENDS_${PN}-ptest += "file make"
@@ -38,7 +39,7 @@ S = "${WORKDIR}/${BPN}-${PV}"
# Let's not configure for the host distro.
#
-PTEST_CONF = "${@base_contains('DISTRO_FEATURES', 'ptest', '--enable-tests', '', d)}"
+PTEST_CONF = "${@bb.utils.contains('DISTRO_FEATURES', 'ptest', '--enable-tests', '', d)}"
EXTRA_OECONF += "--with-distro=${DISTRO} ${PTEST_CONF}"
EXTRA_OECONF += "--with-init-script=\
@@ -47,8 +48,13 @@ ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'systemd', '', d)}"
EXTRA_OECONF += "--enable-log-src-basename"
+CFLAGS_append = " -Wno-error=deprecated-declarations"
+
+# disable problematic GCC 5.2 optimizations [YOCTO #8291]
+FULL_OPTIMIZATION_append_arm = " -fno-schedule-insns2"
+
PACKAGECONFIG ??= "templates \
- ${@base_contains('DISTRO_FEATURES', 'selinux', 'selinux', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'selinux', 'selinux', '', d)} \
"
PACKAGECONFIG[doc] = "--enable-doc --enable-api-docs,--disable-doc --disable-api-docs,,"
PACKAGECONFIG[rpath] = "--enable-rpath,--disable-rpath,,"
@@ -109,7 +115,7 @@ do_install_append() {
for i in `grep -l "#! */bin/bash" ${D}${datadir}/lxc/hooks/*`; do \
sed -e 's|#! */bin/bash|#!/bin/sh|' -i $i; done
- if ${@base_contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
install -d ${D}${sysconfdir}/init.d
install -m 755 config/init/sysvinit/lxc* ${D}${sysconfdir}/init.d
fi
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch b/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch
new file mode 100644
index 000000000..aa57636f7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc/0001-nsexec-fix-build-against-musl-libc.patch
@@ -0,0 +1,48 @@
+From ac6bd953192fa6752a07be7501f69f7cffe33e8e Mon Sep 17 00:00:00 2001
+From: Natanael Copa <natanael.copa@docker.com>
+Date: Tue, 19 Apr 2016 10:43:00 +0200
+Subject: [PATCH] nsexec: fix build against musl libc
+
+Remove a wrongly added include which was added in commit 3c2e77ee (Add a
+compatibility header for CentOS/RHEL 6, 2016-01-29) apparently to
+fix this compile error on centos 6:
+
+> In file included from
+> Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c:20:
+> /usr/include/linux/netlink.h:35: error: expected specifier-qualifier-list before 'sa_family_t'
+
+The glibc bits/sockaddr.h says that this header should never be included
+directly[1]. Instead, sys/socket.h should be used.
+
+The problem was correctly fixed later, in commit 394fb55 (Fix build
+error on centos6, 2016-03-02) so the incorrect bits/sockaddr.h can
+safely be removed.
+
+This is needed to build musl libc.
+
+Fixes #761
+
+[1]: https://github.molgen.mpg.de/git-mirror/glibc/blob/20003c49884422da7ffbc459cdeee768a6fee07b/bits/sockaddr.h#L20
+
+Signed-off-by: Natanael Copa <natanael.copa@docker.com>
+Signed-off-by: Paul Barker <paul@paulbarker.me.uk>
+Upstream-status: Backport
+---
+ libcontainer/nsenter/nsexec.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/libcontainer/nsenter/nsexec.c b/libcontainer/nsenter/nsexec.c
+index 8f37d6c..40a8f89 100644
+--- a/libcontainer/nsenter/nsexec.c
++++ b/libcontainer/nsenter/nsexec.c
+@@ -18,7 +18,6 @@
+ #include <unistd.h>
+ #include <grp.h>
+
+-#include <bits/sockaddr.h>
+ #include <linux/types.h>
+
+ // All arguments should be above the stack because it grows down
+--
+2.1.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/runc/runc_git.bb b/import-layers/meta-virtualization/recipes-containers/runc/runc_git.bb
new file mode 100644
index 000000000..905a751b8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/runc/runc_git.bb
@@ -0,0 +1,66 @@
+HOMEPAGE = "https://github.com/opencontainers/runc"
+SUMMARY = "runc container cli tools"
+DESCRIPTION = "runc is a CLI tool for spawning and running containers according to the OCI specification."
+
+# Note: this rev is before the required protocol field, update when all components
+# have been updated to match.
+SRCREV = "1cdaa709f151b61cee2bdaa09d8e5d2b58a8ba72"
+SRC_URI = "\
+ git://github.com/opencontainers/runc;branch=master \
+ "
+
+# Apache-2.0 for containerd
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=435b266b3899aa8a959f17d41c56def8"
+
+S = "${WORKDIR}/git"
+
+RUNC_VERSION = "1.0.0-rc1"
+PV = "${RUNC_VERSION}+git${SRCREV}"
+
+DEPENDS = "go-cross \
+ "
+RRECOMMENDS_${PN} = "lxc docker"
+
+LIBCONTAINER_PACKAGE="github.com/opencontainers/runc/libcontainer"
+
+do_configure[noexec] = "1"
+EXTRA_OEMAKE="BUILDTAGS=''"
+
+inherit go-osarchmap
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+
+ # Set GOPATH. See 'PACKAGERS.md'. Don't rely on
+ # docker to download its dependencies but rather
+ # use dependencies packaged independently.
+ cd ${S}
+ rm -rf .gopath
+ dname=`dirname "${LIBCONTAINER_PACKAGE}"`
+ bname=`basename "${LIBCONTAINER_PACKAGE}"`
+ mkdir -p .gopath/src/${dname}
+
+ (cd .gopath/src/${dname}; ln -sf ../../../../../${bname} ${bname})
+ export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ cd -
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CFLAGS=""
+ export LDFLAGS=""
+
+ oe_runmake static
+}
+
+do_install() {
+ mkdir -p ${D}/${bindir}
+
+ cp ${S}/runc ${D}/${bindir}/runc
+ ln -sf runc ${D}/${bindir}/docker-runc
+}
+
+INHIBIT_PACKAGE_STRIP = "1"
diff --git a/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend b/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend
deleted file mode 100644
index eb973adfd..000000000
--- a/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend
+++ /dev/null
@@ -1,5 +0,0 @@
-do_install_append() {
- if echo "${DISTRO_FEATURES}" | grep -q 'xen'; then
- echo "xenfs /proc/xen xenfs defaults 0 0" >> ${D}${sysconfdir}/fstab
- fi
-}
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc
index 613e9c7c0..317498784 100644
--- a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc
@@ -1,6 +1,11 @@
inherit cross
-DEPENDS += "go-native"
+# libgcc is required for the target specific libraries to build properly
+DEPENDS += "go-native libgcc"
+
+# Prevent runstrip from running because you get errors when the host arch != target arch
+#INHIBIT_PACKAGE_STRIP = "1"
+STRIP = "echo"
export GOHOSTOS = "${BUILD_GOOS}"
export GOHOSTARCH = "${BUILD_GOARCH}"
@@ -26,6 +31,11 @@ do_compile() {
cd src
./make.bash --host-only
+ # Ensure cgo.a is built with the target toolchain
+ export GOBIN="${B}/target/bin"
+ rm -rf ${GOBIN}
+ mkdir -p ${GOBIN}
+ GO_FLAGS="-a" ./make.bash
}
do_install() {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc
index 8b4be9ec7..cb2dd2a7c 100644
--- a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc
@@ -16,7 +16,7 @@ do_compile() {
mkdir -p ${WORKDIR}/build-tmp
cd src
- ./make.bash --host-only
+ CGO_ENABLED=0 ./make.bash --host-only
}
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb
index 31d724ce3..2a803da52 100644
--- a/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb
@@ -13,15 +13,13 @@ SRCREV = "d957768537c5af40e4f4cd96871f7b2bde9e2923"
S = "${WORKDIR}/git"
-do_unpackpost() {
- rm -rf ${S}/[A-KM-Za-ce-z]* ${S}/doc*
+# NO-OP the do compile rule because this recipe is source only.
+do_compile() {
}
-addtask unpackpost after do_unpack before do_patch
-
do_install() {
install -d ${D}${prefix}/local/go/src/${PKG_NAME}
- cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+ cp -r ${S}/LICENSE ${S}/digest ${D}${prefix}/local/go/src/${PKG_NAME}/
}
SYSROOT_PREPROCESS_FUNCS += "go_distribution_digeset_sysroot_preprocess"
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch
index dac89421a..f5e71ca62 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/disable_tests.patch
@@ -11,9 +11,9 @@ diff -Naur protobuf-c-0.15.old/src/Makefile.am protobuf-c-0.15/src/Makefile.am
@@ -23,7 +23,7 @@
lib_LTLIBRARIES = libprotobuf-c.la
protobufcincludedir = $(includedir)/google/protobuf-c
-
+
-EXTRA_DIST = CMakeLists.txt test/CMakeLists.txt
-+EXTRA_DIST = CMakeLists.txt
-
++EXTRA_DIST = CMakeLists.txt
+
libprotobuf_c_la_SOURCES = \
google/protobuf-c/protobuf-c-dispatch.c \
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch
index 13d4e8485..8b2934284 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/protobuf-allow-running-python-scripts-from-anywhere.patch
@@ -25,7 +25,7 @@ index 8dc9083..a993d63 100644
+ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> add_person_python
+ @echo '$$SCRIPT_DIR/add_person.py "$$@"' >> add_person_python
@chmod +x add_person_python
-
+
list_people_python: list_people.py protoc_middleman
@echo "Writing shortcut script list_people_python..."
@echo '#! /bin/sh' > list_people_python
@@ -33,6 +33,6 @@ index 8dc9083..a993d63 100644
+ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> list_people_python
+ @echo '$$SCRIPT_DIR/list_people.py "$$@"' >> list_people_python
@chmod +x list_people_python
---
+--
1.9.3
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest
index a5a7b0f9b..a5a7b0f9b 100755
--- a/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/files/run-ptest
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb
index 0d03ebef8..ff2499e50 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-c_1.2.1.bb
@@ -4,16 +4,16 @@ HOMEPAGE = "http://code.google.com/p/protobuf-c/"
SECTION = "console/tools"
LICENSE = "Apache-2.0"
-LIC_FILES_CHKSUM = "file://protobuf-c/protobuf-c.c;endline=28;md5=0feb44cc63eacef97219b0174967492f"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=235c3195a3968524dc1524b4ebea0c0e"
COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
DEPENDS = "protobuf protobuf-c-native"
-SRC_URI[md5sum] = "41d437677ea16f9d3611d98841c4af3b"
-SRC_URI[sha256sum] = "09c5bb187b7a8e86bc0ff860f7df86370be9e8661cdb99c1072dcdab0763562c"
-SRC_URI = "https://github.com/protobuf-c/protobuf-c/releases/download/v1.1.1/protobuf-c-1.1.1.tar.gz "
-SRC_URI_append_class-target ="file://0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch"
+SRC_URI[md5sum] = "e544249c329391fff512c3874895cfbe"
+SRC_URI[sha256sum] = "846eb4846f19598affdc349d817a8c4c0c68fd940303e6934725c889f16f00bd"
+SRC_URI = "https://github.com/protobuf-c/protobuf-c/releases/download/v1.2.1/protobuf-c-1.2.1.tar.gz "
+#SRC_URI_append_class-target ="file://0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch"
inherit autotools pkgconfig
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.0.0.bb
index e88c9e78b..33467b360 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf-native_3.0.0.bb
@@ -6,13 +6,13 @@ HOMEPAGE = "http://code.google.com/p/protobuf/"
SECTION = "console/tools"
LICENSE = "BSD-3-Clause"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=35953c752efc9299b184f91bef540095"
PR = "r0"
-SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
-SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
-SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz \
+SRC_URI[md5sum] = "d4f6ca65aadc6310b3872ee421e79fa6"
+SRC_URI[sha256sum] = "f5b3563f118f1d3d6e001705fa7082e8fc3bda50038ac3dff787650795734146"
+SRC_URI = "https://github.com/google/protobuf/archive/v3.0.0.tar.gz;downloadfilename=protobuf-3.0.0.tar.gz \
"
EXTRA_OECONF += " --with-protoc=echo --disable-shared"
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.0.0.bb
index 1b7ab2011..fd81fe765 100644
--- a/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb
+++ b/import-layers/meta-virtualization/recipes-devtools/protobuf/protobuf_3.0.0.bb
@@ -6,17 +6,17 @@ HOMEPAGE = "http://code.google.com/p/protobuf/"
SECTION = "console/tools"
LICENSE = "BSD-3-Clause"
-LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=35953c752efc9299b184f91bef540095"
PR = "r0"
EXCLUDE_FROM_WORLD = "1"
-SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
-SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
-SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz\
- file://protobuf-allow-running-python-scripts-from-anywhere.patch \
- file://Omit-google-apputils-dependency.patch \
- file://run-ptest"
+SRC_URI[md5sum] = "d4f6ca65aadc6310b3872ee421e79fa6"
+SRC_URI[sha256sum] = "f5b3563f118f1d3d6e001705fa7082e8fc3bda50038ac3dff787650795734146"
+SRC_URI = "https://github.com/google/protobuf/archive/v3.0.0.tar.gz;downloadfilename=protobuf-3.0.0.tar.gz\
+ file://protobuf-allow-running-python-scripts-from-anywhere.patch \
+ file://run-ptest \
+ "
COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
@@ -24,6 +24,7 @@ EXTRA_OECONF += " --with-protoc=${STAGING_BINDIR_NATIVE}/protoc"
inherit autotools setuptools ptest
DEPENDS += "protobuf-native"
+RDEPENDS_${PN}-ptest = "bash"
PYTHON_SRC_DIR="python"
TEST_SRC_DIR="examples"
@@ -36,7 +37,7 @@ do_compile() {
do_compile_ptest() {
# Modify makefile to use the cross-compiler
- sed -e "s|c++|${CXX}|g" -i "${S}/${TEST_SRC_DIR}/Makefile"
+ sed -e "s|c++|${CXX} \$(LDFLAGS)|g" -i "${S}/${TEST_SRC_DIR}/Makefile"
mkdir -p "${B}/${TEST_SRC_DIR}"
diff --git a/import-layers/meta-virtualization/recipes-extended/iasl/iasl/Make-CC-definition-conditional.patch b/import-layers/meta-virtualization/recipes-extended/iasl/iasl/Make-CC-definition-conditional.patch
new file mode 100644
index 000000000..f69a36c63
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/iasl/iasl/Make-CC-definition-conditional.patch
@@ -0,0 +1,29 @@
+[PATCH] Make CC definition conditional
+
+Upstream-Status: pending
+
+By hardcoding CC's definition to gcc, make this packages unable to
+cross-compile. the -e options of make can not override the CC since
+it is not defined in Makefile, but in Makefile.config
+
+Signed-off-by: Roy.Li <rongqing.li@windriver.com>
+---
+ generate/unix/Makefile.config | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/generate/unix/Makefile.config b/generate/unix/Makefile.config
+index 8d41399..72d597d 100644
+--- a/generate/unix/Makefile.config
++++ b/generate/unix/Makefile.config
+@@ -35,7 +35,7 @@
+ .SUFFIXES :
+ PROGS = acpibin acpidump acpiexamples acpiexec acpihelp acpinames acpisrc acpixtract iasl
+ HOST ?= _CYGWIN
+-CC = gcc
++CC ?= gcc
+
+ #
+ # Common defines
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/iasl/iasl/iasl.1 b/import-layers/meta-virtualization/recipes-extended/iasl/iasl/iasl.1
new file mode 100644
index 000000000..000a3b812
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/iasl/iasl/iasl.1
@@ -0,0 +1,135 @@
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH IASL 1 "October 14, 2005"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh disable hyphenation
+.\" .hy enable hyphenation
+.\" .ad l left justify
+.\" .ad b justify to both left and right margins
+.\" .nf disable filling
+.\" .fi enable filling
+.\" .br insert line break
+.\" .sp <n> insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+iasl \- ACPI Source Language compiler/decompiler
+.SH SYNOPSIS
+.B iasl
+.RI [ options ]
+.RI [ "input file" ]
+.SH DESCRIPTION
+This manual page documents briefly the
+.B iasl
+command. The option list is taken from the iasl interactive help.
+.PP
+.\" TeX users may be more comfortable with the \fB<whatever>\fP and
+.\" \fI<whatever>\fP escape sequences to invode bold face and italics,
+.\" respectively.
+.B iasl
+is an ASL compiler and decompiler.
+
+.SH OPTIONS
+
+.PP
+.SS General Output
+.TP
+.B \-p <prefix>
+Specify filename prefix for all output files (including .aml)
+.TP
+.B \-vi
+Less verbose errors and warnings for use with IDEs
+.TP
+.B \-vo
+Enable optimization comments
+.TP
+.B \-vr
+Disable remarks
+.TP
+.B \-vs
+Disable signon
+
+.PP
+.SS AML Output Files
+.TP
+.B \-s<a|c>
+Create AML in assembler or C source file (*.asm or *.c)
+.TP
+.B \-i<a|c>
+Create assembler or C include file (*.inc or *.h)
+.TP
+.B \-t<a|c>
+Create AML in assembler or C hex table (*.hex)
+
+.PP
+.SS AML Code Generation
+.TP
+.B \-oa
+Disable all optimizations (compatibility mode)
+.TP
+.B \-of
+Disable constant folding
+.TP
+.B \-oi
+Disable integer optimization to Zero/One/Ones
+.TP
+.B \-on
+Disable named reference string optimization
+.TP
+.B \-r<Revision>
+Override table header Revision (1-255)
+
+.PP
+.SS Listings
+.TP
+.B \-l
+Create mixed listing file (ASL source and AML) (*.lst)
+.TP
+.B \-ln
+Create namespace file (*.nsp)
+.TP
+.B \-ls
+Create combined source file (expanded includes) (*.src)
+
+.PP
+.SS AML Disassembler
+.TP
+.B \-d [file]
+Disassemble AML to ASL source code file (*.dsl)
+.TP
+.B \-dc [file]
+Disassemble AML and immediately compile it
+.br
+(Obtain DSDT from current system if no input file)
+.TP
+.B \-e
+Generate External() statements for unresolved symbols
+.TP
+.B \-g
+Get ACPI tables and write to files (*.dat)
+
+.PP
+.SS Miscellaneous
+.TP
+.B \-a
+Verify source file is entirely ASCII text (0x00-0x7F)
+
+.PP
+.SS Help
+.TP
+.B \-h
+Additional help and compiler debug options
+.TP
+.B \-hc
+Display operators allowed in constant expressions
+.TP
+.B \-hr
+Display ACPI reserved method names
+
+.SH AUTHOR
+iasl was written by Robert Moore <robert.moore@intel.com>.
+.PP
+This manual page was written by Mattia Dongili <malattia@debian.org>,
+for the Debian project (but may be used by others).
diff --git a/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb b/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb
deleted file mode 100644
index 5ce093007..000000000
--- a/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb
+++ /dev/null
@@ -1,29 +0,0 @@
-DESCRIPTION = "This is a cross development C compiler, assembler and linker environment for the production of 8086 executables (Optionally MSDOS COM)"
-HOMEPAGE = "http://www.acpica.org/"
-LICENSE = "Intel-ACPI"
-LIC_FILES_CHKSUM = "file://asldefine.h;endline=115;md5=d4d7cf809b8b5e03131327b3f718e8f0"
-SECTION = "console/tools"
-PR="r1"
-
-DEPENDS="flex bison"
-
-SRC_URI="https://acpica.org/sites/acpica/files/acpica-unix-${PV}.tar.gz"
-
-SRC_URI[md5sum] = "324c89e5bb9002e2711e0494290ceacc"
-SRC_URI[sha256sum] = "b2b497415f29ddbefe7be8b9429b62c1f1f6e1ec11456928e4e7da86578e5b8d"
-
-S="${WORKDIR}/acpica-unix-${PV}/source/compiler"
-
-NATIVE_INSTALL_WORKS = "1"
-BBCLASSEXTEND = "native"
-
-do_compile() {
- CFLAGS="-Wno-error=redundant-decls" $MAKE
-}
-
-do_install() {
- mkdir -p ${D}${prefix}/bin
- cp ${S}/iasl ${D}${prefix}/bin
-}
-
-
diff --git a/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20160527.bb b/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20160527.bb
new file mode 100644
index 000000000..62e83c477
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20160527.bb
@@ -0,0 +1,36 @@
+SUMMARY = "Intel ASL compiler/decompiler"
+DESCRIPTION = "This is a cross development C compiler, assembler and linker environment for the production of 8086 executables (Optionally MSDOS COM)"
+HOMEPAGE = "http://www.acpica.org/"
+LICENSE = "Intel-ACPI"
+LIC_FILES_CHKSUM = "file://source/compiler/aslcompiler.h;beginline=7;endline=114;md5=09f82edcd148ab4c8aa554bc3e9d0676"
+SECTION = "console/tools"
+
+DEPENDS = "bison-native flex-native"
+
+SRC_URI = "https://www.acpica.org/sites/acpica/files/acpica-unix-${PV}.tar.gz \
+ file://iasl.1 \
+ file://Make-CC-definition-conditional.patch \
+"
+
+SRC_URI[md5sum] = "be677fc358de9dadc036e1ea678a101b"
+SRC_URI[sha256sum] = "6b681732624de1eb58b2bcf1c7ef0744ba14ed35fcaa534d4421574782fbb848"
+
+S = "${WORKDIR}/acpica-unix-${PV}/"
+
+BBCLASSEXTEND = "native"
+CFLAGS += "-D_LINUX -DACPI_ASL_COMPILER -I../include -I../compiler"
+
+COMPATIBLE_HOST = "(x86_64.*|i.86.*)-linux"
+
+# By setting NOOPT we suppress forcing -O2 and setting _FORTIFY_SOURCE=2. Let the
+# optimization and security cflags set them.
+#
+do_compile() {
+ oe_runmake iasl NOOPT=TRUE NOFORTIFY=TRUE
+}
+
+do_install() {
+ install -d ${D}${bindir} ${D}${mandir}/man1
+ install -m 0755 ${S}/generate/unix/bin/iasl ${D}${bindir}
+ install -m 0644 ${WORKDIR}/iasl.1 ${D}${mandir}/man1
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
index f13940ce4..ab7e92c37 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
@@ -7,8 +7,8 @@ IMAGE_INSTALL += " \
${@bb.utils.contains('MACHINE_FEATURES', 'acpi', 'kernel-module-xen-acpi-processor', '', d)} \
"
-IMAGE_INSTALL += "${@base_contains('DISTRO_FEATURES', 'x11', ' xf86-video-fbdev', '', d)}"
-IMAGE_INSTALL += "${@base_contains('DISTRO_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
+IMAGE_INSTALL += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', ' xf86-video-fbdev', '', d)}"
+IMAGE_INSTALL += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
LICENSE = "MIT"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
index 26b6d0654..b8c200220 100644
--- a/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
@@ -20,6 +20,14 @@ LICENSE = "MIT"
inherit core-image
+do_check_xen_state() {
+ if [ "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' yes', 'no', d)}" = "no" ]; then
+ die "DISTRO_FEATURES does not contain 'xen'"
+ fi
+}
+
+addtask check_xen_state before do_rootfs
+
syslinux_iso_populate_append() {
install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${ISODIR}${ISOLINUXDIR}
install -m 0444 ${STAGING_DATADIR}/syslinux/mboot.c32 ${ISODIR}${ISOLINUXDIR}
diff --git a/import-layers/meta-virtualization/recipes-extended/kvmtool/files/external-crosscompiler.patch b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/external-crosscompiler.patch
new file mode 100644
index 000000000..75cef3b19
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/kvmtool/files/external-crosscompiler.patch
@@ -0,0 +1,31 @@
+Upstream-Status: Inappropriate [embedded specific]
+
+This allows OpenEmbedded to pass in cross compiler configuration using
+the default envirnment variables. It is required so that kvmtool can
+be linked against the cross-compiled libfdt library.
+
+diff --git a/Makefile b/Makefile
+index 1f0196f..8bfb068 100644
+--- a/Makefile
++++ b/Makefile
+@@ -14,11 +14,6 @@ export E Q
+ include config/utilities.mak
+ include config/feature-tests.mak
+
+-CC := $(CROSS_COMPILE)gcc
+-CFLAGS :=
+-LD := $(CROSS_COMPILE)ld
+-LDFLAGS :=
+-
+ FIND := find
+ CSCOPE := cscope
+ TAGS := ctags
+@@ -297,7 +292,7 @@ $(warning No static libc found. Skipping guest init)
+ endif
+
+ ifeq (y,$(ARCH_WANT_LIBFDT))
+- ifneq ($(call try-build,$(SOURCE_LIBFDT),$(CFLAGS),-lfdt),y)
++ ifneq ($(call try-build,$(SOURCE_LIBFDT),$(CPPFLAGS) $(CFLAGS),-lfdt),y)
+ $(error No libfdt found. Please install libfdt-dev package)
+ else
+ CFLAGS_DYNOPT += -DCONFIG_HAS_LIBFDT
diff --git a/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb b/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb
new file mode 100644
index 000000000..3f299dd59
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/kvmtool/kvmtool.bb
@@ -0,0 +1,23 @@
+SUMMARY = "Native Linux KVM tool"
+DESCRIPTION = "kvmtool is a lightweight tool for hosting KVM guests."
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=fcb02dc552a041dee27e4b85c7396067"
+
+DEPENDS = "dtc libaio zlib"
+
+SRC_URI = "git://git.kernel.org/pub/scm/linux/kernel/git/will/kvmtool.git \
+ file://external-crosscompiler.patch \
+ "
+
+SRCREV = "0093df80d754e1a05b016e5a4ccd4b51a00c562c"
+PV = "3.18.0+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+EXTRA_OEMAKE='ARCH="${TARGET_ARCH}" V=1'
+
+do_install() {
+ install -d ${D}${bindir}
+ install -m 0755 ${S}/lkvm ${D}${bindir}/
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-qemu-Let-empty-default-VNC-password-work-as-document.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-qemu-Let-empty-default-VNC-password-work-as-document.patch
new file mode 100644
index 000000000..1d13dd36b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-qemu-Let-empty-default-VNC-password-work-as-document.patch
@@ -0,0 +1,81 @@
+Upstream-Status: Backport
+
+Backport patch to fix CVE-2016-5008 from:
+
+https://libvirt.org/git/?p=libvirt.git;a=commit;h=f32441c69bf450d6ac593c3acd621c37e120cdaf
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+From f32441c69bf450d6ac593c3acd621c37e120cdaf Mon Sep 17 00:00:00 2001
+From: Jiri Denemark <jdenemar@redhat.com>
+Date: Tue, 28 Jun 2016 14:39:58 +0200
+Subject: [PATCH] qemu: Let empty default VNC password work as documented
+
+CVE-2016-5008
+
+Setting an empty graphics password is documented as a way to disable
+VNC/SPICE access, but QEMU does not always behaves like that. VNC would
+happily accept the empty password. Let's enforce the behavior by setting
+password expiration to "now".
+
+https://bugzilla.redhat.com/show_bug.cgi?id=1180092
+
+Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
+(cherry picked from commit bb848feec0f3f10e92dd8e5231ae7aa89b5598f3)
+---
+ src/qemu/qemu_hotplug.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
+index 5f12d77..fda28b0 100644
+--- a/src/qemu/qemu_hotplug.c
++++ b/src/qemu/qemu_hotplug.c
+@@ -3547,6 +3547,7 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver,
+ time_t now = time(NULL);
+ char expire_time [64];
+ const char *connected = NULL;
++ const char *password;
+ int ret = -1;
+ virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
+
+@@ -3554,16 +3555,14 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver,
+ ret = 0;
+ goto cleanup;
+ }
++ password = auth->passwd ? auth->passwd : defaultPasswd;
+
+ if (auth->connected)
+ connected = virDomainGraphicsAuthConnectedTypeToString(auth->connected);
+
+ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+ goto cleanup;
+- ret = qemuMonitorSetPassword(priv->mon,
+- type,
+- auth->passwd ? auth->passwd : defaultPasswd,
+- connected);
++ ret = qemuMonitorSetPassword(priv->mon, type, password, connected);
+
+ if (ret == -2) {
+ if (type != VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
+@@ -3571,14 +3570,15 @@ qemuDomainChangeGraphicsPasswords(virQEMUDriverPtr driver,
+ _("Graphics password only supported for VNC"));
+ ret = -1;
+ } else {
+- ret = qemuMonitorSetVNCPassword(priv->mon,
+- auth->passwd ? auth->passwd : defaultPasswd);
++ ret = qemuMonitorSetVNCPassword(priv->mon, password);
+ }
+ }
+ if (ret != 0)
+ goto end_job;
+
+- if (auth->expires) {
++ if (password[0] == '\0') {
++ snprintf(expire_time, sizeof(expire_time), "now");
++ } else if (auth->expires) {
+ time_t lifetime = auth->validTo - now;
+ if (lifetime <= 0)
+ snprintf(expire_time, sizeof(expire_time), "now");
+--
+2.9.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-to-fix-build-error.patch
index 089ee330e..089ee330e 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.5/0001-to-fix-build-error.patch
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc
index c1dafe9dc..bc2f1de29 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc
@@ -18,8 +18,8 @@ FILES_${PN}-python = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
SRC_URI += "http://libvirt.org/sources/python/libvirt-python-${PV}.tar.gz;name=libvirt_python"
SRC_URI += "file://libvirt_api_xml_path.patch;patchdir=../libvirt-python-${PV}"
-SRC_URI[libvirt_python.md5sum] = "ed018c714d7ddbe93221c796dff283ed"
-SRC_URI[libvirt_python.sha256sum] = "6d35ae9e7801573393b9c92471f39e6700d479f10b641df81d041b469f160bf8"
+SRC_URI[libvirt_python.md5sum] = "4dbd7ef9ee9c4dea5887b5b31eb04529"
+SRC_URI[libvirt_python.sha256sum] = "a0508a57637fd18a3584fb9d2322fb172f65708c9db16e0438a70eb0f36fa5c2"
export LIBVIRT_API_PATH = "${S}/docs/libvirt-api.xml"
export LIBVIRT_CFLAGS = "-I${S}/include"
@@ -41,14 +41,14 @@ python __anonymous () {
do_compile_append() {
if [ "${LIBVIRT_PYTHON_ENABLE}" = "1" ]; then
- cd ${WORKDIR}/libvirt-python-${PV} && \
+ cd ${WORKDIR}/${BPN}-python-${PV} && \
${STAGING_BINDIR_NATIVE}/python-native/python setup.py build
fi
}
do_install_append() {
if [ "${LIBVIRT_PYTHON_ENABLE}" = "1" ]; then
- cd ${WORKDIR}/${PN}-python-${PV} && \
+ cd ${WORKDIR}/${BPN}-python-${PV} && \
${STAGING_BINDIR_NATIVE}/python-native/python setup.py install \
--install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${LIBVIRT_INSTALL_ARGS}
fi
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-nsslinktest-also-build-virAtomic.h.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-nsslinktest-also-build-virAtomic.h.patch
new file mode 100644
index 000000000..6ab1c4e5c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-nsslinktest-also-build-virAtomic.h.patch
@@ -0,0 +1,40 @@
+Upstream-Status: Submitted [http://www.redhat.com/archives/libvir-list/2016-August/msg00389.html]
+
+When build for architecture that don't use gcc atomic ops but pthread,
+it fails to build for arm:
+
+| ../tools/nss/.libs/libnss_libvirt_impl.a(libvirt_nss_la-virobject.o): In function `virClassNew':
+| /buildarea2/kkang/builds/qemuarm-Aug03/bitbake_build/tmp/work/armv5e-wrs-linux-gnueabi/libvirt/1.3.5-r0/build/src/../../libvirt-1.3.5/src/util/virobject.c:153: undefined reference to `virAtomicLock'
+| ../tools/nss/.libs/libnss_libvirt_impl.a(libvirt_nss_la-virobject.o): In function `virObjectNew':
+| /buildarea2/kkang/builds/qemuarm-Aug03/bitbake_build/tmp/work/armv5e-wrs-linux-gnueabi/libvirt/1.3.5-r0/build/src/../../libvirt-1.3.5/src/util/virobject.c:205: undefined reference to `virAtomicLock'
+| ../tools/nss/.libs/libnss_libvirt_impl.a(libvirt_nss_la-virobject.o): In function `virObjectUnref':
+| /buildarea2/kkang/builds/qemuarm-Aug03/bitbake_build/tmp/work/armv5e-wrs-linux-gnueabi/libvirt/1.3.5-r0/build/src/../../libvirt-1.3.5/src/util/virobject.c:277: undefined reference to `virAtomicLock'
+| ../tools/nss/.libs/libnss_libvirt_impl.a(libvirt_nss_la-virobject.o): In function `virObjectRef':
+| /buildarea2/kkang/builds/qemuarm-Aug03/bitbake_build/tmp/work/armv5e-wrs-linux-gnueabi/libvirt/1.3.5-r0/build/src/../../libvirt-1.3.5/src/util/virobject.c:298: undefined reference to `virAtomicLock'
+| collect2: error: ld returned 1 exit status
+
+It is similar with:
+
+http://libvirt.org/git/?p=libvirt.git;a=commit;h=12dc729
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+ src/Makefile.am | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/src/Makefile.am b/src/Makefile.am
+index 12b66c2..6e55972 100644
+--- a/src/Makefile.am
++++ b/src/Makefile.am
+@@ -2989,6 +2989,8 @@ noinst_LTLIBRARIES += libvirt-nss.la
+ libvirt_nss_la_SOURCES = \
+ util/viralloc.c \
+ util/viralloc.h \
++ util/viratomic.c \
++ util/viratomic.h \
+ util/virbitmap.c \
+ util/virbitmap.h \
+ util/virbuffer.c \
+--
+2.9.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-Remove-Windows-1252-check-from-esxutilstest.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-Remove-Windows-1252-check-from-esxutilstest.patch
new file mode 100644
index 000000000..217bdbc5b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-Remove-Windows-1252-check-from-esxutilstest.patch
@@ -0,0 +1,28 @@
+From ffc71da15c3da068f85d16617b6e0c0175fc0110 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Tue, 23 Aug 2016 02:28:47 -0400
+Subject: [PATCH] ptest: Remove Windows-1252 check from esxutilstest
+
+Currently we use iconv from glibc-locale and it does not support
+Windows-1252 and we don't need support windows character encoding.
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ tests/esxutilstest.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/tests/esxutilstest.c b/tests/esxutilstest.c
+index 44bdc84..3223de3 100644
+--- a/tests/esxutilstest.c
++++ b/tests/esxutilstest.c
+@@ -258,7 +258,6 @@ mymain(void)
+ DO_TEST(ParseDatastorePath);
+ DO_TEST(ConvertDateTimeToCalendarTime);
+ DO_TEST(EscapeDatastoreItem);
+- DO_TEST(ConvertWindows1252ToUTF8);
+
+ return result == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+ }
+--
+2.8.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-add-missing-test_helper-files.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-add-missing-test_helper-files.patch
new file mode 100644
index 000000000..b4f1e2730
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/0001-ptest-add-missing-test_helper-files.patch
@@ -0,0 +1,29 @@
+From e625a42caca492fe7d52b70bbbf83ae4d99cb15e Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Tue, 23 Aug 2016 02:16:20 -0400
+Subject: [PATCH] ptest: add missing test_helper files
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ tests/Makefile.am | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 1c85656..2f8b9eb 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -1422,8 +1422,10 @@ install-ptest:
+ @(for file in $(PTESTS); do \
+ if [ -f .libs/$$file ]; then \
+ install .libs/$$file $(DEST_DIR)/tests; \
+- else \
++ elif [ -f $(srcdir)/$$file ]; then \
+ install $(srcdir)/$$file $(DEST_DIR)/tests; \
++ else \
++ install $(builddir)/$$file $(DEST_DIR)/tests; \
+ fi; \
+ done;)
+ @(if [ -d .libs ]; then install .libs/*.so $(DEST_DIR)/tests/.libs; fi;)
+--
+2.8.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch
index 16c3a16bc..02ddf68eb 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch
@@ -1,3 +1,9 @@
+Upstream-Status: Inappropriate [configuration]
+
+Update context for version 1.3.5.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
From dd915e7f70e676aea93f750c75d16ce646e71e4b Mon Sep 17 00:00:00 2001
From: Mark Asselstine <mark.asselstine@windriver.com>
Date: Wed, 9 Sep 2015 11:52:44 -0400
@@ -24,20 +30,20 @@ diff --git a/daemon/Makefile.am b/daemon/Makefile.am
index 2dbe81b..41ea2db 100644
--- a/daemon/Makefile.am
+++ b/daemon/Makefile.am
-@@ -445,7 +445,7 @@ endif ! LIBVIRT_INIT_SCRIPT_UPSTART
+@@ -449,7 +449,7 @@ endif ! LIBVIRT_INIT_SCRIPT_UPSTART
if LIBVIRT_INIT_SCRIPT_SYSTEMD
-SYSTEMD_UNIT_DIR = $(prefix)/lib/systemd/system
+SYSTEMD_UNIT_DIR = /lib/systemd/system
- BUILT_SOURCES += libvirtd.service libvirtd.socket
+ BUILT_SOURCES += libvirtd.service
- install-init-systemd: install-sysconfig libvirtd.service libvirtd.socket
+ install-init-systemd: install-sysconfig libvirtd.service
diff --git a/src/Makefile.am b/src/Makefile.am
index a316b4d..d271291 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
-@@ -2594,7 +2594,7 @@ EXTRA_DIST += \
+@@ -2633,7 +2633,7 @@ EXTRA_DIST += \
if WITH_LIBVIRTD
if LIBVIRT_INIT_SCRIPT_SYSTEMD
@@ -50,7 +56,7 @@ diff --git a/tools/Makefile.am b/tools/Makefile.am
index b3227a7..0e58f73 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
-@@ -356,7 +356,7 @@ libvirt-guests.init: libvirt-guests.init.in libvirt-guests.sh
+@@ -380,7 +380,7 @@ libvirt-guests.init: libvirt-guests.init.in libvirt-guests.sh
EXTRA_DIST += libvirt-guests.service.in
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/install-missing-file.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/install-missing-file.patch
new file mode 100644
index 000000000..ecd4a87ba
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/install-missing-file.patch
@@ -0,0 +1,52 @@
+Upstream-Status: Inapproriate
+
+This patch is for ptest, so it is inapproriate to send to upstream.
+
+Update context for 1.3.5.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
+From 0780181a3285511f166487a54ebc231fc657edfe Mon Sep 17 00:00:00 2001
+From: Catalin Enache <catalin.enache@windriver.com>
+Date: Mon, 25 Jul 2016 16:38:51 +0300
+Subject: [PATCH] Install missing conf file
+
+openvzutilstest.conf file is needed by openvzutilstest test.
+
+Signed-off-by: Catalin Enache <catalin.enache@windriver.com>
+---
+ tests/Makefile.am | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 02e0dd8..187fbca 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -170,6 +170,7 @@ EXTRA_DIST = \
+ xml2vmxdata
+
+ test_helpers = commandhelper ssh virconftest
++test_misc =
+ test_programs = virshtest sockettest \
+ nodeinfotest virbuftest \
+ commandtest seclabeltest \
+@@ -259,6 +260,7 @@ endif WITH_LXC
+
+ if WITH_OPENVZ
+ test_programs += openvzutilstest
++test_misc += openvzutilstest.conf
+ endif WITH_OPENVZ
+
+ if WITH_ESX
+@@ -1293,7 +1295,7 @@ endif ! WITH_CIL
+
+ buildtest-TESTS: $(TESTS) $(test_libraries) $(test_helpers)
+
+-PTESTS = $(TESTS) $(test_helpers) test-lib.sh schematestutils.sh
++PTESTS = $(TESTS) $(test_helpers) $(test_misc) test-lib.sh schematestutils.sh
+
+ install-ptest:
+ list='$(TESTS) $(test_helpers) test-lib.sh schematestutils.sh'
+--
+2.7.4
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch
index a33f56950..b7609a81a 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch
@@ -1,14 +1,18 @@
+Update context for 1.3.5.
+
+Signed-off-by: Kai Kang <kai.kang@windriver.com>
+---
Add 'install-ptest' rule.
Change TESTS_ENVIRONMENT to allow running outside build dir.
Signed-off-by: Mihaela Sendrea <mihaela.sendrea@enea.com>
Upstream-status: Pending
-
-Index: libvirt-1.2.4/tests/Makefile.am
-===================================================================
---- libvirt-1.2.4.orig/tests/Makefile.am
-+++ libvirt-1.2.4/tests/Makefile.am
-@@ -31,9 +31,11 @@
+---
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 0c4ad3c..bb4e31a 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -33,11 +33,13 @@ INCLUDES = \
-I$(top_srcdir)/src/conf \
$(GETTEXT_CPPFLAGS)
@@ -16,13 +20,17 @@ Index: libvirt-1.2.4/tests/Makefile.am
+
AM_CFLAGS = \
- -Dabs_builddir="\"$(abs_builddir)\"" \
+- -Dabs_topbuilddir="\"$(abs_topbuilddir)\"" \
- -Dabs_srcdir="\"$(abs_srcdir)\"" \
+- -Dabs_topsrcdir="\"$(abs_topsrcdir)\"" \
+ -Dabs_builddir="\"$(PTEST_DIR)/tests\"" \
++ -Dabs_topbuilddir="\"$(PTEST_DIR)\"" \
+ -Dabs_srcdir="\"$(PTEST_DIR)/tests\"" \
++ -Dabs_topsrcdir="\"$(PTEST_DIR)\"" \
$(LIBXML_CFLAGS) \
- $(LIBNL_CFLAGS) \
+ $(LIBNL_CFLAGS) \
$(GNUTLS_CFLAGS) \
-@@ -48,7 +50,7 @@
+@@ -62,7 +64,7 @@ QEMULIB_LDFLAGS = \
if WITH_DRIVER_MODULES
INCLUDES += \
@@ -31,15 +39,12 @@ Index: libvirt-1.2.4/tests/Makefile.am
endif WITH_DRIVER_MODULES
PROBES_O =
-@@ -409,20 +411,19 @@
+@@ -483,17 +485,15 @@ TESTS = $(test_programs) \
# Also, BSD sh doesn't like 'a=b b=$$a', so we can't use an
# intermediate shell variable, but must do all the expansion in make
-lv_abs_top_builddir=$(shell cd '$(top_builddir)' && pwd)
- path_add = $(subst :,$(PATH_SEPARATOR),\
-- $(subst !,$(lv_abs_top_builddir)/,!daemon:!tools:!tests))
-+ $(subst !,$(PTEST_DIR)/,!daemon:!tools:!tests))
-
+-
VIR_TEST_EXPENSIVE ?= $(VIR_TEST_EXPENSIVE_DEFAULT)
TESTS_ENVIRONMENT = \
- abs_top_builddir=$(lv_abs_top_builddir) \
@@ -52,14 +57,13 @@ Index: libvirt-1.2.4/tests/Makefile.am
+ abs_builddir="$(PTEST_DIR)/tests" \
+ abs_srcdir="$(PTEST_DIR)/tests" \
+ CONFIG_HEADER="$(PTEST_DIR)/config.h" \
- PATH="$(path_add)$(PATH_SEPARATOR)$$PATH" \
SHELL="$(SHELL)" \
- LIBVIRT_DRIVER_DIR="$(lv_abs_top_builddir)/src/.libs" \
+ LIBVIRT_DRIVER_DIR="$(PTEST_DIR)/src/.libs" \
LIBVIRT_AUTOSTART=0 \
LC_ALL=C \
VIR_TEST_EXPENSIVE=$(VIR_TEST_EXPENSIVE) \
-@@ -1137,5 +1138,51 @@
+@@ -1388,5 +1388,51 @@ else ! WITH_CIL
EXTRA_DIST += objectlocking.ml
endif ! WITH_CIL
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb
index 42066c1fe..fad563233 100644
--- a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.5.bb
@@ -1,11 +1,10 @@
DESCRIPTION = "A toolkit to interact with the virtualization capabilities of recent versions of Linux."
HOMEPAGE = "http://libvirt.org"
-LICENSE = "LGPLv2.1+"
-LICENSE_${PN}-ptest = "GPLv2+ & LGPLv2.1"
+LICENSE = "LGPLv2.1+ & GPLv2+"
+LICENSE_${PN}-ptest = "GPLv2+ & LGPLv2.1+"
LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
file://COPYING.LESSER;md5=4b54a1fd55a448865a0b32d41598759d"
SECTION = "console/tools"
-PR = "r1"
DEPENDS = "bridge-utils gnutls libxml2 lvm2 avahi parted curl libpcap util-linux e2fsprogs pm-utils \
iptables dnsmasq readline libtasn1 libxslt-native"
@@ -33,10 +32,15 @@ SRC_URI = "http://libvirt.org/sources/libvirt-${PV}.tar.gz;name=libvirt \
file://libvirt-use-pkg-config-to-locate-libcap.patch \
file://0001-to-fix-build-error.patch \
file://Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch \
+ file://install-missing-file.patch \
+ file://0001-nsslinktest-also-build-virAtomic.h.patch \
+ file://0001-qemu-Let-empty-default-VNC-password-work-as-document.patch \
+ file://0001-ptest-add-missing-test_helper-files.patch \
+ file://0001-ptest-Remove-Windows-1252-check-from-esxutilstest.patch \
"
-SRC_URI[libvirt.md5sum] = "b48b06bbc7efbe9973ed0f3f223d6da2"
-SRC_URI[libvirt.sha256sum] = "e3c6fc2683178660b371efb3ac7a1103a3f4b78efac7ffe560bc5917974ccf05"
+SRC_URI[libvirt.md5sum] = "f9dc1e63d559eca50ae0ee798a4c6c6d"
+SRC_URI[libvirt.sha256sum] = "93a23c44eb431da46c9458f95a66e29c9b98e37515d44b6be09e75b35ec94ac8"
inherit autotools gettext update-rc.d pkgconfig ptest systemd
@@ -107,7 +111,7 @@ FILES_${PN}-libvirtd = " \
/usr/lib/sysctl.d/60-libvirtd.conf \
${sbindir}/libvirtd \
${systemd_unitdir}/system/* \
- ${@base_contains('DISTRO_FEATURES', 'sysvinit', '', '${libexecdir}/libvirt-guests.sh', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', '', '${libexecdir}/libvirt-guests.sh', d)} \
"
FILES_${PN}-virsh = "${bindir}/virsh"
@@ -136,7 +140,6 @@ INITSCRIPT_PARAMS_${PN}-libvirtd = "defaults 72"
SYSTEMD_PACKAGES = "${PN}-libvirtd"
SYSTEMD_SERVICE_${PN}-libvirtd = " \
- libvirtd.socket \
libvirtd.service \
virtlockd.service \
libvirt-guests.service \
@@ -167,9 +170,9 @@ PRIVATE_LIBS_${PN}-ptest = " \
# full config
PACKAGECONFIG ??= "qemu yajl uml openvz vmware vbox esx iproute2 lxc test \
remote macvtap libvirtd netcf udev python ebtables \
- ${@base_contains('DISTRO_FEATURES', 'selinux', 'selinux audit libcap-ng', '', d)} \
- ${@base_contains('DISTRO_FEATURES', 'xen', 'xen libxl xen-inotify', '', d)} \
- ${@base_contains('DISTRO_FEATURES', 'x11', 'polkit', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'selinux', 'selinux audit libcap-ng', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'xen', 'xen libxl xen-inotify', '', d)} \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'polkit', '', d)} \
"
# enable,disable,depends,rdepends
@@ -248,7 +251,7 @@ do_install_append() {
>> ${D}${sysconfdir}/default/volatiles/99_libvirt
# Add hook support for libvirt
- mkdir -p ${D}/etc/libvirt/hooks
+ mkdir -p ${D}/etc/libvirt/hooks
# remove .la references to our working diretory
for i in `find ${D}${libdir} -type f -name *.la`; do
@@ -269,8 +272,10 @@ do_compile_ptest() {
do_install_ptest() {
oe_runmake -C tests install-ptest
+ find ${S}/tests -maxdepth 1 -type d -exec cp -r {} ${D}${PTEST_PATH}/tests/ \;
+
# remove .la files for ptest, they aren't required and can trigger QA errors
- for i in `find ${D}${PTEST_PATH} -type f -name *.la`; do
+ for i in `find ${D}${PTEST_PATH} -type f \( -name *.la -o -name *.o \)`; do
rm -f $i
done
}
diff --git a/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb b/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb
deleted file mode 100644
index 9ba5cd65b..000000000
--- a/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb
+++ /dev/null
@@ -1,46 +0,0 @@
-SUMMARY = "Tools to Manage Multipathed Devices with the device-mapper"
-DESCRIPTION = "This package provides the tools to manage multipathed devices by \
-instructing the device-mapper multipath module what to do"
-
-HOMEPAGE = "http://christophe.varoqui.free.fr/"
-DEPENDS = "readline libaio lvm2 udev"
-LICENSE = "GPLv2"
-
-LIC_FILES_CHKSUM = "file://COPYING;md5=7be2873b6270e45abacc503abbe2aa3d"
-S="${WORKDIR}/git"
-
-
-SRC_URI = "git://git.opensvc.com/multipath-tools/.git;protocol=http"
-
-SRCREV = "d3683ab18b386e9b3b54b59a122c689e9ebdf5cf"
-PV = "0.4.9+gitr${SRCPV}"
-
-inherit autotools-brokensep
-
-EXTRA_OEMAKE="LIB=${libdir} exec_prefix=${exec_prefix} libdir=${libdir}"
-
-PACKAGES =+ "libmpathpersist mpathpersist kpartx libmultipath multipath multipathd libmultipath-dev libmpathpersist-dev"
-
-
-RDEPENDS_${PN} += "libmpathpersist mpathpersist kpartx libmultipath multipath multipathd udev"
-
-do_install_append () {
- ln -sf libmpathpersist.so.0 ${D}${libdir}/libmpathpersist.so
- ln -sf libmultipath.so.0 ${D}${libdir}/libmultipath.so
-}
-
-ALLOW_EMPTY_${PN} = "1"
-FILES_${PN} = ""
-
-FILES_libmpathpersist = "${libdir}/libmpathpersist*.so.0"
-FILES_mpathpersist = "${sbindir}/mpathpersist"
-FILES_kpartx = "${sbindir}/kpartx ${base_libdir}/udev/"
-FILES_libmultipath = "${libdir}/libcheck*.so ${libdir}/libpri*.so ${libdir}/libmultipath*.so.0"
-FILES_multipath = "${sbindir}/multipath ${sysconfdir}"
-FILES_multipathd = "${sbindir}/multipathd ${base_libdir}"
-
-#put the symbol link lib in -dev
-FILES_libmultipath-dev = "${libdir}/libmultipath*.so"
-FILES_libmpathpersist-dev = "${libdir}/libmpathpersist*.so"
-
-
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/README b/import-layers/meta-virtualization/recipes-extended/xen/README
new file mode 100644
index 000000000..3686530cf
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/README
@@ -0,0 +1,24 @@
+Xen
+---
+
+For any issues with the xen recipes please make sure you CC cardoe@gentoo.org
+
+configuring the hypervisor
+--------------------------
+
+Since 4.7.0 Xen supports using Kconfig to configure the hypervisor. Similarly
+to how the recipe for busybox works, you can provide a .config as a defconfig
+to override the default configuration of the hypervisor. The easiest way
+for you to take advantage of this is to create a .config for Xen and then
+copy it to your Yocto layer as 'defconfig' inside of
+'recipes-extended/xen/files/' and then create a bbappend adding
+'file://defconfig' to your SRC_URI.
+
+security patches
+----------------
+
+The base recipe does not include security fixes that the Xen community releases
+as XSAs (http://xenbits.xen.org/xsa/). The easiest way to include those is to
+drop patches in 'recipes-extened/xen/files' and create a bbappend adding those
+patches to SRC_URI and they will be applied. Alternatively, you can override
+the SRC_URI to a git repo you provide that contains the patches.
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
index de8c16868..b5c5f030a 100644
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
@@ -3,7 +3,7 @@ HOMEPAGE = "http://xen.org"
LICENSE = "GPLv2"
SECTION = "console/tools"
-LIC_FILES_CHKSUM = "file://COPYING;md5=a6260c12cd5de27e80d89ae18e48d20a"
+LIC_FILES_CHKSUM = "file://COPYING;md5=bbb4b1bdc2c3b6743da3c39d03249095"
COMPATIBLE_HOST = '(x86_64.*).*-linux|aarch64.*-linux'
@@ -52,6 +52,7 @@ DEPENDS = " \
xz \
yajl \
zlib \
+ gnu-efi \
"
# inherit setuptools adds python to RDEPENDS, override it
@@ -151,6 +152,7 @@ PACKAGES = "\
${PN}-base \
${PN}-blktap \
${PN}-console \
+ ${PN}-cpuid \
${PN}-dbg \
${PN}-dev \
${PN}-devd \
@@ -196,6 +198,7 @@ PACKAGES = "\
${PN}-libxenvchan-dev \
${PN}-libxlutil \
${PN}-libxlutil-dev \
+ ${PN}-livepatch \
${PN}-misc \
${PN}-pygrub \
${PN}-python \
@@ -369,6 +372,10 @@ FILES_${PN}-console = "\
${sbindir}/xenconsoled \
"
+FILES_${PN}-cpuid = "\
+ ${bindir}/xen-cpuid \
+ "
+
FILES_${PN}-devd = "\
${sysconfdir}/init.d/xendriverdomain \
"
@@ -399,6 +406,10 @@ FILES_${PN}-kdd = "\
${sbindir}/kdd \
"
+FILES_${PN}-livepatch += " \
+ ${sbindir}/xen-livepatch \
+ "
+
FILES_${PN}-misc = "\
${bindir}/xencons \
${bindir}/xencov_split \
@@ -527,6 +538,7 @@ FILES_${PN}-remus = "\
"
FILES_${PN}-scripts-network = " \
+ ${sysconfdir}/xen/scripts/colo-proxy-setup \
${sysconfdir}/xen/scripts/network-bridge \
${sysconfdir}/xen/scripts/network-nat \
${sysconfdir}/xen/scripts/network-route \
@@ -544,6 +556,7 @@ FILES_${PN}-scripts-block = " \
${sysconfdir}/xen/scripts/blktap \
${sysconfdir}/xen/scripts/block \
${sysconfdir}/xen/scripts/block-common.sh \
+ ${sysconfdir}/xen/scripts/block-dummy \
${sysconfdir}/xen/scripts/block-enbd \
${sysconfdir}/xen/scripts/block-iscsi \
${sysconfdir}/xen/scripts/block-nbd \
@@ -667,6 +680,7 @@ FILES_${PN}-xm = "\
FILES_${PN}-xencommons += "\
${sysconfdir}/default/xencommons \
${sysconfdir}/init.d/xencommons \
+ ${sysconfdir}/xen/scripts/launch-xenstore \
${systemd_unitdir}/modules-load.d/xen.conf \
${systemd_unitdir}/system/proc-xen.mount \
${systemd_unitdir}/system/xen-qemu-dom0-disk-backend.service \
@@ -744,9 +758,11 @@ export XEN_OS = "Linux"
# this is used for the header (#!${bindir}/python) of the install python scripts
export PYTHONPATH="${bindir}/python"
+export ac_cv_path_PYTHONPATH="${bindir}/python"
-# seabios forcefully sets HOSTCC to CC - fixup to allow it to build native conf executable
+# xen and seabios require HOSTCC and HOSTCXX set to cross-compile
export HOSTCC="${BUILD_CC}"
+export HOSTCXX="${BUILD_CXX}"
# make xen requires CROSS_COMPILE set by hand as it does not abide by ./configure
export CROSS_COMPILE="${TARGET_PREFIX}"
@@ -825,6 +841,13 @@ do_stubs() {
addtask stubs after do_configure before do_compile
+# Allow all hypervisor settings in a defconfig
+EXTRA_OEMAKE += "XEN_CONFIG_EXPERT=y"
+# Build release versions always. Technically since we track release
+# tarballs this always happens but occasionally people pull in patches
+# from staging that reverts this
+EXTRA_OEMAKE += "debug=n"
+
do_configure() {
#./configure --enable-xsmpolicy does not set XSM_ENABLE must be done manually
@@ -832,11 +855,19 @@ do_configure() {
echo "XSM_ENABLE := y" > ${S}/.config
fi
+ if [ -f "${WORKDIR}/defconfig" ]; then
+ cp "${WORKDIR}/defconfig" "${B}/xen/.config" || \
+ bbfatal "Unable to copy defconfig to .config"
+ fi
+
# do configure
oe_runconf
}
do_compile() {
+ # workaround for build bug when CFLAGS is exported
+ # https://www.mail-archive.com/xen-devel@lists.xen.org/msg67822.html
+ unset CFLAGS
oe_runmake
}
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb
deleted file mode 100644
index 0adf8adbb..000000000
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb
+++ /dev/null
@@ -1,10 +0,0 @@
-require xen.inc
-
-SRC_URI = " \
- http://bits.xensource.com/oss-xen/release/${PV}/xen-${PV}.tar.gz \
- "
-
-SRC_URI[md5sum] = "df2d854c3c90ffeefaf71e7f868fb326"
-SRC_URI[sha256sum] = "44cc2fccba1e147ef4c8da0584ce0f24189c8743de0e3e9a9226da88ddb5f589"
-
-S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb
new file mode 100644
index 000000000..35c91373f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.8.0.bb
@@ -0,0 +1,10 @@
+require xen.inc
+
+SRC_URI = " \
+ https://downloads.xenproject.org/release/xen/${PV}/xen-${PV}.tar.gz \
+ "
+
+SRC_URI[md5sum] = "d738f7c741110342621cb8a4d10b0191"
+SRC_URI[sha256sum] = "1e15c713ab7ba3bfda8b4a285ed973529364fd1100e6dd5a61f29583dc667b04"
+
+S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb
index a86a50176..e10d669fa 100644
--- a/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb
@@ -1,15 +1,16 @@
require xen.inc
-SRCREV = "1fd615aa0108490ffc558d27627f509183cbfdaf"
+SRCREV ?= "9a6cc4f5c14b3d7542b7523f88a1b65464733d3a"
-XEN_REL="4.6"
+XEN_REL ?= "4.7"
+XEN_BRANCH ?= "staging-${XEN_REL}"
-PV = "${XEN_REL}.0+git${SRCPV}"
+PV = "${XEN_REL}+git${SRCPV}"
S = "${WORKDIR}/git"
SRC_URI = " \
- git://xenbits.xen.org/xen.git;branch=staging-${XEN_REL} \
+ git://xenbits.xen.org/xen.git;branch=${XEN_BRANCH} \
"
DEFAULT_PREFERENCE = "-1"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
index 035b31429..fa2344a76 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
@@ -19,3 +19,28 @@ CONFIG_BLK_CGROUP=m
CONFIG_NETPRIO_CGROUP=m
CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+
+# Virtual drivers
+CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_NET=m
+CONFIG_HVC_DRIVER=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_RING=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+
+
+# Base support for live boot
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_VFAT_FS=y
+CONFIG_RD_GZIP=y
+
+# Support for virtual ethernet and LXC
+CONFIG_VETH=y
+CONFIG_MACVLAN=y
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
index 85e98cc69..f3be89ed0 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -13,7 +13,7 @@ KERNEL_MODULE_AUTOLOAD += "kvm-amd"
KERNEL_MODULE_AUTOLOAD += "kvm-intel"
# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@base_contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
# xen kernel support
-SRC_URI += "${@base_contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
+SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 85e98cc69..f3be89ed0 100644
--- a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -13,7 +13,7 @@ KERNEL_MODULE_AUTOLOAD += "kvm-amd"
KERNEL_MODULE_AUTOLOAD += "kvm-intel"
# aufs kernel support required for xen-image-minimal
-KERNEL_FEATURES_append += "${@base_contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
# xen kernel support
-SRC_URI += "${@base_contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
+SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.8.bbappend
new file mode 100644
index 000000000..f3be89ed0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -0,0 +1,19 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI += "file://xt-checksum.scc \
+ file://ebtables.scc \
+ file://vswitch.scc \
+ file://lxc.scc \
+ "
+KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
+
+KERNEL_MODULE_AUTOLOAD += "openvswitch"
+KERNEL_MODULE_AUTOLOAD += "kvm"
+KERNEL_MODULE_AUTOLOAD += "kvm-amd"
+KERNEL_MODULE_AUTOLOAD += "kvm-intel"
+
+# aufs kernel support required for xen-image-minimal
+KERNEL_FEATURES_append += "${@bb.utils.contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+
+# xen kernel support
+SRC_URI += "${@bb.utils.contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb b/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb
new file mode 100644
index 000000000..073022e5c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/netns/netns_git.bb
@@ -0,0 +1,44 @@
+HOMEPAGE = "https://github.com/jfrazelle/netns"
+SUMMARY = "Runc hook for setting up default bridge networking."
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=20ce4c6a4f32d6ee4a68e3a7506db3f1"
+DEPENDS = "go-cross"
+
+SRC_URI = "git://github.com/jfrazelle/netns;branch=master"
+SRCREV = "2804050eeab661bfa75c3aa06bdcf60273b02ca7"
+PV = "0.1.0+git${SRCPV}"
+
+S = "${WORKDIR}/git"
+
+inherit go-osarchmap
+
+do_compile() {
+ export GOARCH="${TARGET_GOARCH}"
+
+ # Setup vendor directory so that it can be used in GOPATH.
+ #
+ # Go looks in a src directory under any directory in GOPATH but netns
+ # uses 'vendor' instead of 'vendor/src'. We can fix this with a symlink.
+ #
+ # We also need to link in the ipallocator directory as that is not under
+ # a src directory.
+ ln -sfn . "${S}/vendor/src"
+ mkdir -p "${S}/vendor/src/github.com/jfrazelle/netns"
+ ln -sfn "${S}/ipallocator" "${S}/vendor/src/github.com/jfrazelle/netns/ipallocator"
+ export GOPATH="${S}/vendor"
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CFLAGS=""
+ export LDFLAGS=""
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+
+ oe_runmake static
+}
+
+do_install() {
+ install -d ${D}/${sbindir}
+ install ${S}/netns ${D}/${sbindir}/netns
+}
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
index fc515e908..3c70703a7 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
@@ -15,7 +15,7 @@ DEPENDS += "bridge-utils openssl python perl"
RDEPENDS_${PN} += "util-linux-uuidgen util-linux-libuuid coreutils \
python perl perl-module-strict ${PN}-switch \
- bash"
+ bash python-twisted"
RDEPENDS_${PN}-testcontroller = "${PN} lsb ${PN}-pki"
RDEPENDS_${PN}-switch = "${PN} openssl procps util-linux-uuidgen"
RDEPENDS_${PN}-pki = "${PN}"
@@ -69,7 +69,8 @@ FILES_${PN}-switch = "\
FILES_${PN} += "${datadir}/ovsdbmonitor"
FILES_${PN} += "/run"
-inherit autotools update-rc.d systemd
+FILES_${PN} += "${libdir}/python${PYTHON_BASEVERSION}/"
+inherit autotools update-rc.d systemd python-dir
SYSTEMD_PACKAGES = "${PN}-switch"
SYSTEMD_SERVICE_${PN}-switch = " \
@@ -102,6 +103,8 @@ do_install_append() {
${D}/${systemd_unitdir}/system/openvswitch-nonetwork.service
oe_runmake modules_install INSTALL_MOD_PATH=${D}
+ install -d ${D}${libdir}/python${PYTHON_BASEVERSION}/site-packages
+ cp -r ${S}/python/ovstest/ ${D}${libdir}/python${PYTHON_BASEVERSION}/site-packages/
}
pkg_postinst_${PN}-pki () {
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
index b46d5af4e..1c6252a96 100644
--- a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
@@ -50,4 +50,5 @@ do_install_ptest() {
do_install_append() {
oe_runmake modules_install INSTALL_MOD_PATH=${D}
+ rm -r ${D}/${localstatedir}/run
}
OpenPOWER on IntegriCloud