summaryrefslogtreecommitdiffstats
path: root/poky
diff options
context:
space:
mode:
authorBrad Bishop <bradleyb@fuzziesquirrel.com>2019-03-25 13:13:56 -0400
committerBrad Bishop <bradleyb@fuzziesquirrel.com>2019-03-25 13:14:34 -0400
commitf8caae304a2fa94cf2770b72a313ee843b2f177b (patch)
tree019dd9bf04c554e796e3ec9fc04c311adcf3c93a /poky
parent2ef13c18a8b076d2a34e9e40d765f687b72ef195 (diff)
downloadtalos-openbmc-f8caae304a2fa94cf2770b72a313ee843b2f177b.tar.gz
talos-openbmc-f8caae304a2fa94cf2770b72a313ee843b2f177b.zip
poky: refresh thud: 506ec088e5..e4c0a8a7cb
Update poky to thud HEAD. Alexander Kanavin (1): ca-certificates: upgrade 20180409 -> 20190110 André Draszik (1): systemd: RDEPENDS on util-linux-umount Changqing Li (1): libsndfile1: Security fix CVE-2018-19432 Chen Qi (1): target-sdk-provides-dummy: add more perl modules to avoid populate_sdk failure Douglas Royds (1): libpam: libpamc is licensed under its own BSD-style licence George McCollister (1): systemd: fix CVE-2019-6454 Jonathan Rajotte-Julien (3): lttng-ust: update to 2.10.3 lttng-modules: update to 2.10.9 lttng-tools: update to 2.9.11 Mark Hatle (10): bitbake: gitsm.py: Fix when a submodule is defined, but not initialized bitbake: gitsm.py: Add support for alternative URL formats from submodule files bitbake: tests/fetch.py: Add alternative gitsm test case bitbake: gitsm.py: Optimize code and attempt to resolve locking issue bitbake: gitsm.py: revise unpack bitbake: gitsm.py: Rework the shallow fetcher and test case bitbake: gitsm.py: Refactor the functions and simplify the class bitbake: gitsm.py: Fix relative URLs bitbake: gitsmy.py: Fix unpack of submodules of submodules bitbake: gitsm: The fetcher did not process some recursive submodules properly. Ming Liu (1): rm_work: sort the value of do_build dependencies Oleksandr Kravchuk (1): target-sdk-provides-dummy: add perl-module-overload Richard Purdie (3): target-sdk-provides-dummy: Extend to -dev and -src packages systemd: Update recent CVE patches kernel: Ensure an initramfs is added if configured Robert Yang (1): send-error-report: Add --no-ssl to use http protocol Ross Burton (1): libpng: fix CVE-2019-7317 Change-Id: I3e03c837688d49703b4989a561f3728d616abbec Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Diffstat (limited to 'poky')
-rw-r--r--poky/bitbake/lib/bb/fetch2/gitsm.py253
-rw-r--r--poky/bitbake/lib/bb/tests/fetch.py70
-rw-r--r--poky/meta/classes/kernel.bbclass4
-rw-r--r--poky/meta/classes/rm_work.bbclass3
-rw-r--r--poky/meta/recipes-core/meta/target-sdk-provides-dummy.bb14
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0024-journald-do-not-store-the-iovec-entry-for-process-co.patch6
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields-1k.patch56
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields.patch139
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0026-journal-fix-out-of-bounds-read-CVE-2018-16866.patch49
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0026-journal-remote-set-a-limit-on-the-number-of-fields-i.patch84
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0027-journal-fix-syslog_parse_identifier.patch77
-rw-r--r--poky/meta/recipes-core/systemd/systemd/0028-journal-do-not-remove-multiple-spaces-after-identifi.patch84
-rw-r--r--poky/meta/recipes-core/systemd/systemd/CVE-2019-6454.patch210
-rw-r--r--poky/meta/recipes-core/systemd/systemd/sd-bus-if-we-receive-an-invalid-dbus-message-ignore-.patch61
-rw-r--r--poky/meta/recipes-core/systemd/systemd_239.bb10
-rw-r--r--poky/meta/recipes-extended/pam/libpam_1.3.0.bb4
-rw-r--r--poky/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-net-expose-sk-wmem-in-sock_exceed_buf_limit-trac.patch67
-rw-r--r--poky/meta/recipes-kernel/lttng/lttng-modules_2.10.9.bb (renamed from poky/meta/recipes-kernel/lttng/lttng-modules_2.10.7.bb)5
-rw-r--r--poky/meta/recipes-kernel/lttng/lttng-tools/0001-Allow-multiple-attempts-to-connect-to-relayd.patch17
-rw-r--r--poky/meta/recipes-kernel/lttng/lttng-tools_2.9.11.bb (renamed from poky/meta/recipes-kernel/lttng/lttng-tools_2.9.5.bb)4
-rw-r--r--poky/meta/recipes-kernel/lttng/lttng-ust_2.10.3.bb (renamed from poky/meta/recipes-kernel/lttng/lttng-ust_2.10.1.bb)4
-rw-r--r--poky/meta/recipes-multimedia/libpng/libpng/CVE-2019-7317.patch20
-rw-r--r--poky/meta/recipes-multimedia/libpng/libpng_1.6.36.bb3
-rw-r--r--poky/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2018-19432.patch115
-rw-r--r--poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb1
-rw-r--r--poky/meta/recipes-support/ca-certificates/ca-certificates_20190110.bb (renamed from poky/meta/recipes-support/ca-certificates/ca-certificates_20180409.bb)2
-rwxr-xr-xpoky/scripts/send-error-report11
27 files changed, 823 insertions, 550 deletions
diff --git a/poky/bitbake/lib/bb/fetch2/gitsm.py b/poky/bitbake/lib/bb/fetch2/gitsm.py
index 35729dbc0..b21fed266 100644
--- a/poky/bitbake/lib/bb/fetch2/gitsm.py
+++ b/poky/bitbake/lib/bb/fetch2/gitsm.py
@@ -45,60 +45,97 @@ class GitSM(Git):
"""
return ud.type in ['gitsm']
- @staticmethod
- def parse_gitmodules(gitmodules):
- modules = {}
- module = ""
- for line in gitmodules.splitlines():
- if line.startswith('[submodule'):
- module = line.split('"')[1]
- modules[module] = {}
- elif module and line.strip().startswith('path'):
- path = line.split('=')[1].strip()
- modules[module]['path'] = path
- elif module and line.strip().startswith('url'):
- url = line.split('=')[1].strip()
- modules[module]['url'] = url
- return modules
-
- def update_submodules(self, ud, d):
+ def process_submodules(self, ud, workdir, function, d):
+ """
+ Iterate over all of the submodules in this repository and execute
+ the 'function' for each of them.
+ """
+
submodules = []
paths = {}
+ revision = {}
uris = {}
- local_paths = {}
-
+ subrevision = {}
+
+ def parse_gitmodules(gitmodules):
+ modules = {}
+ module = ""
+ for line in gitmodules.splitlines():
+ if line.startswith('[submodule'):
+ module = line.split('"')[1]
+ modules[module] = {}
+ elif module and line.strip().startswith('path'):
+ path = line.split('=')[1].strip()
+ modules[module]['path'] = path
+ elif module and line.strip().startswith('url'):
+ url = line.split('=')[1].strip()
+ modules[module]['url'] = url
+ return modules
+
+ # Collect the defined submodules, and their attributes
for name in ud.names:
try:
- gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=ud.clonedir)
+ gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
except:
# No submodules to update
continue
- for m, md in self.parse_gitmodules(gitmodules).items():
+ for m, md in parse_gitmodules(gitmodules).items():
+ try:
+ module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
+ except:
+ # If the command fails, we don't have a valid file to check. If it doesn't
+ # fail -- it still might be a failure, see next check...
+ module_hash = ""
+
+ if not module_hash:
+ logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
+ continue
+
submodules.append(m)
paths[m] = md['path']
+ revision[m] = ud.revisions[name]
uris[m] = md['url']
+ subrevision[m] = module_hash.split()[2]
+
+ # Convert relative to absolute uri based on parent uri
if uris[m].startswith('..'):
newud = copy.copy(ud)
- newud.path = os.path.realpath(os.path.join(newud.path, md['url']))
+ newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
uris[m] = Git._get_repo_url(self, newud)
for module in submodules:
- module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], paths[module]), d, quiet=True, workdir=ud.clonedir)
- module_hash = module_hash.split()[2]
+ # Translate the module url into a SRC_URI
+
+ if "://" in uris[module]:
+ # Properly formated URL already
+ proto = uris[module].split(':', 1)[0]
+ url = uris[module].replace('%s:' % proto, 'gitsm:', 1)
+ else:
+ if ":" in uris[module]:
+ # Most likely an SSH style reference
+ proto = "ssh"
+ if ":/" in uris[module]:
+ # Absolute reference, easy to convert..
+ url = "gitsm://" + uris[module].replace(':/', '/', 1)
+ else:
+ # Relative reference, no way to know if this is right!
+ logger.warning("Submodule included by %s refers to relative ssh reference %s. References may fail if not absolute." % (ud.url, uris[module]))
+ url = "gitsm://" + uris[module].replace(':', '/', 1)
+ else:
+ # This has to be a file reference
+ proto = "file"
+ url = "gitsm://" + uris[module]
- # Build new SRC_URI
- proto = uris[module].split(':', 1)[0]
- url = uris[module].replace('%s:' % proto, 'gitsm:', 1)
url += ';protocol=%s' % proto
url += ";name=%s" % module
- url += ";bareclone=1;nocheckout=1;nobranch=1"
+ url += ";subpath=%s" % paths[module]
ld = d.createCopy()
# Not necessary to set SRC_URI, since we're passing the URI to
# Fetch.
#ld.setVar('SRC_URI', url)
- ld.setVar('SRCREV_%s' % module, module_hash)
+ ld.setVar('SRCREV_%s' % module, subrevision[module])
# Workaround for issues with SRCPV/SRCREV_FORMAT errors
# error refer to 'multiple' repositories. Only the repository
@@ -106,145 +143,63 @@ class GitSM(Git):
ld.setVar('SRCPV', d.getVar('SRCPV'))
ld.setVar('SRCREV_FORMAT', module)
- newfetch = Fetch([url], ld, cache=False)
- newfetch.download()
- local_paths[module] = newfetch.localpath(url)
-
- # Correct the submodule references to the local download version...
- runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_paths[module]}, d, workdir=ud.clonedir)
-
- symlink_path = os.path.join(ud.clonedir, 'modules', paths[module])
- if not os.path.exists(symlink_path):
- try:
- os.makedirs(os.path.dirname(symlink_path), exist_ok=True)
- except OSError:
- pass
- os.symlink(local_paths[module], symlink_path)
-
- return True
+ function(ud, url, module, paths[module], ld)
- def need_update(self, ud, d):
- main_repo_needs_update = Git.need_update(self, ud, d)
-
- # First check that the main repository has enough history fetched. If it doesn't, then we don't
- # even have the .gitmodules and gitlinks for the submodules to attempt asking whether the
- # submodules' histories are recent enough.
- if main_repo_needs_update:
- return True
-
- # Now check that the submodule histories are new enough. The git-submodule command doesn't have
- # any clean interface for doing this aside from just attempting the checkout (with network
- # fetched disabled).
- return not self.update_submodules(ud, d)
+ return submodules != []
def download(self, ud, d):
- Git.download(self, ud, d)
+ def download_submodule(ud, url, module, modpath, d):
+ url += ";bareclone=1;nobranch=1"
- if not ud.shallow or ud.localpath != ud.fullshallow:
- self.update_submodules(ud, d)
+ # Is the following still needed?
+ #url += ";nocheckout=1"
- def copy_submodules(self, submodules, ud, destdir, d):
- if ud.bareclone:
- repo_conf = destdir
- else:
- repo_conf = os.path.join(destdir, '.git')
-
- if submodules and not os.path.exists(os.path.join(repo_conf, 'modules')):
- os.mkdir(os.path.join(repo_conf, 'modules'))
-
- for module, md in submodules.items():
- srcpath = os.path.join(ud.clonedir, 'modules', md['path'])
- modpath = os.path.join(repo_conf, 'modules', md['path'])
+ try:
+ newfetch = Fetch([url], d, cache=False)
+ newfetch.download()
+ except Exception as e:
+ logger.error('gitsm: submodule download failed: %s %s' % (type(e).__name__, str(e)))
+ raise
- if os.path.exists(srcpath):
- if os.path.exists(os.path.join(srcpath, '.git')):
- srcpath = os.path.join(srcpath, '.git')
+ Git.download(self, ud, d)
+ self.process_submodules(ud, ud.clonedir, download_submodule, d)
- target = modpath
- if os.path.exists(modpath):
- target = os.path.dirname(modpath)
+ def unpack(self, ud, destdir, d):
+ def unpack_submodules(ud, url, module, modpath, d):
+ url += ";bareclone=1;nobranch=1"
- os.makedirs(os.path.dirname(target), exist_ok=True)
- runfetchcmd("cp -fpLR %s %s" % (srcpath, target), d)
- elif os.path.exists(modpath):
- # Module already exists, likely unpacked from a shallow mirror clone
- pass
+ # Figure out where we clone over the bare submodules...
+ if ud.bareclone:
+ repo_conf = ud.destdir
else:
- # This is fatal, as we do NOT want git-submodule to hit the network
- raise bb.fetch2.FetchError('Submodule %s does not exist in %s or %s.' % (module, srcpath, modpath))
-
- def clone_shallow_local(self, ud, dest, d):
- super(GitSM, self).clone_shallow_local(ud, dest, d)
+ repo_conf = os.path.join(ud.destdir, '.git')
- # Copy over the submodules' fetched histories too.
- repo_conf = os.path.join(dest, '.git')
-
- submodules = []
- for name in ud.names:
try:
- gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revision), d, quiet=True, workdir=dest)
- except:
- # No submodules to update
- continue
+ newfetch = Fetch([url], d, cache=False)
+ newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', modpath)))
+ except Exception as e:
+ logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
+ raise
- submodules = self.parse_gitmodules(gitmodules)
- self.copy_submodules(submodules, ud, dest, d)
+ local_path = newfetch.localpath(url)
- def unpack(self, ud, destdir, d):
- Git.unpack(self, ud, destdir, d)
+ # Correct the submodule references to the local download version...
+ runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_path}, d, workdir=ud.destdir)
- # Copy over the submodules' fetched histories too.
- if ud.bareclone:
- repo_conf = ud.destdir
- else:
- repo_conf = os.path.join(ud.destdir, '.git')
+ if ud.shallow:
+ runfetchcmd("%(basecmd)s config submodule.%(module)s.shallow true" % {'basecmd': ud.basecmd, 'module': module}, d, workdir=ud.destdir)
- update_submodules = False
- paths = {}
- uris = {}
- local_paths = {}
- for name in ud.names:
+ # Ensure the submodule repository is NOT set to bare, since we're checking it out...
try:
- gitmodules = runfetchcmd("%s show HEAD:.gitmodules" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
+ runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=os.path.join(repo_conf, 'modules', modpath))
except:
- # No submodules to update
- continue
-
- submodules = self.parse_gitmodules(gitmodules)
- self.copy_submodules(submodules, ud, ud.destdir, d)
-
- submodules_queue = [(module, os.path.join(repo_conf, 'modules', md['path'])) for module, md in submodules.items()]
- while len(submodules_queue) != 0:
- module, modpath = submodules_queue.pop()
-
- # add submodule children recursively
- try:
- gitmodules = runfetchcmd("%s show HEAD:.gitmodules" % (ud.basecmd), d, quiet=True, workdir=modpath)
- for m, md in self.parse_gitmodules(gitmodules).items():
- submodules_queue.append([m, os.path.join(modpath, 'modules', md['path'])])
- except:
- # no children
- pass
-
+ logger.error("Unable to set git config core.bare to false for %s" % os.path.join(repo_conf, 'modules', modpath))
+ raise
- # There are submodules to update
- update_submodules = True
-
- # Determine (from the submodule) the correct url to reference
- try:
- output = runfetchcmd("%(basecmd)s config remote.origin.url" % {'basecmd': ud.basecmd}, d, workdir=modpath)
- except bb.fetch2.FetchError as e:
- # No remote url defined in this submodule
- continue
-
- local_paths[module] = output
-
- # Setup the local URL properly (like git submodule init or sync would do...)
- runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_paths[module]}, d, workdir=ud.destdir)
+ Git.unpack(self, ud, destdir, d)
- # Ensure the submodule repository is NOT set to bare, since we're checking it out...
- runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=modpath)
+ ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
- if update_submodules:
+ if not ud.bareclone and ret:
# Run submodule update, this sets up the directories -- without touching the config
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
diff --git a/poky/bitbake/lib/bb/tests/fetch.py b/poky/bitbake/lib/bb/tests/fetch.py
index 6848095cf..522d2024f 100644
--- a/poky/bitbake/lib/bb/tests/fetch.py
+++ b/poky/bitbake/lib/bb/tests/fetch.py
@@ -893,12 +893,70 @@ class FetcherNetworkTest(FetcherTest):
@skipIfNoNetwork()
def test_git_submodule(self):
- fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d)
+ # URL with ssh submodules
+ url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7"
+ # Original URL (comment this if you have ssh access to git.yoctoproject.org)
+ url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ # Previous cwd has been deleted
+ os.chdir(os.path.dirname(self.unpackdir))
+ fetcher.unpack(self.unpackdir)
+
+ repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
+ self.assertTrue(os.path.exists(repo_path), msg='Unpacked repository missing')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake')), msg='bitbake submodule missing')
+ self.assertFalse(os.path.exists(os.path.join(repo_path, 'na')), msg='uninitialized submodule present')
+
+ # Only when we're running the extended test with a submodule's submodule, can we check this.
+ if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')):
+ self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing')
+
+ def test_git_submodule_dbus_broker(self):
+ # The following external repositories have show failures in fetch and unpack operations
+ # We want to avoid regressions!
+ url = "gitsm://github.com/bus1/dbus-broker;protocol=git;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ # Previous cwd has been deleted
+ os.chdir(os.path.dirname(self.unpackdir))
+ fetcher.unpack(self.unpackdir)
+
+ repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-dvar/config')), msg='Missing submodule config "subprojects/c-dvar"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-list/config')), msg='Missing submodule config "subprojects/c-list"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-rbtree/config')), msg='Missing submodule config "subprojects/c-rbtree"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-sundry/config')), msg='Missing submodule config "subprojects/c-sundry"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-utf8/config')), msg='Missing submodule config "subprojects/c-utf8"')
+
+ def test_git_submodule_CLI11(self):
+ url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf"
+ fetcher = bb.fetch.Fetch([url], self.d)
fetcher.download()
# Previous cwd has been deleted
os.chdir(os.path.dirname(self.unpackdir))
fetcher.unpack(self.unpackdir)
+ repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/googletest/config')), msg='Missing submodule config "extern/googletest"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"')
+
+ def test_git_submodule_aktualizr(self):
+ url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=git;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
+ fetcher = bb.fetch.Fetch([url], self.d)
+ fetcher.download()
+ # Previous cwd has been deleted
+ os.chdir(os.path.dirname(self.unpackdir))
+ fetcher.unpack(self.unpackdir)
+
+ repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/config')), msg='Missing submodule config "partial/extern/isotp-c/config"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/modules/deps/bitfield-c/config')), msg='Missing submodule config "partial/extern/isotp-c/modules/deps/bitfield-c/config"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, 'partial/extern/isotp-c/deps/bitfield-c/.git')), msg="Submodule of submodule isotp-c did not unpack properly")
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/tests/tuf-test-vectors/config')), msg='Missing submodule config "tests/tuf-test-vectors/config"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/googletest/config')), msg='Missing submodule config "third_party/googletest/config"')
+ self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/HdrHistogram_c/config')), msg='Missing submodule config "third_party/HdrHistogram_c/config"')
class TrustedNetworksTest(FetcherTest):
def test_trusted_network(self):
@@ -1312,6 +1370,7 @@ class GitShallowTest(FetcherTest):
# fetch and unpack, from the shallow tarball
bb.utils.remove(self.gitdir, recurse=True)
bb.utils.remove(ud.clonedir, recurse=True)
+ bb.utils.remove(ud.clonedir.replace('gitsource', 'gitsubmodule'), recurse=True)
# confirm that the unpacked repo is used when no git clone or git
# mirror tarball is available
@@ -1466,6 +1525,7 @@ class GitShallowTest(FetcherTest):
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
self.add_empty_file('asub', cwd=smdir)
+ self.add_empty_file('bsub', cwd=smdir)
self.git('submodule init', cwd=self.srcdir)
self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
@@ -1475,10 +1535,16 @@ class GitShallowTest(FetcherTest):
uri = 'gitsm://%s;protocol=file;subdir=${S}' % self.srcdir
fetcher, ud = self.fetch_shallow(uri)
+ # Verify the main repository is shallow
self.assertRevCount(1)
- assert './.git/modules/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
+
+ # Verify the gitsubmodule directory is present
assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule'))
+ # Verify the submodule is also shallow
+ self.assertRevCount(1, cwd=os.path.join(self.gitdir, 'gitsubmodule'))
+
+
if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')):
def test_shallow_annex(self):
self.add_empty_file('a')
diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass
index 45cb4fabc..bd185e258 100644
--- a/poky/meta/classes/kernel.bbclass
+++ b/poky/meta/classes/kernel.bbclass
@@ -224,9 +224,11 @@ copy_initramfs() {
break
;;
esac
+ break
fi
done
- echo "Finished copy of initramfs into ./usr"
+ # Verify that the above loop found a initramfs, fail otherwise
+ [ -f ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio ] && echo "Finished copy of initramfs into ./usr" || die "Could not find any ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.cpio{.gz|.lz4|.lzo|.lzma|.xz) for bundling; INITRAMFS_IMAGE_NAME might be wrong."
}
do_bundle_initramfs () {
diff --git a/poky/meta/classes/rm_work.bbclass b/poky/meta/classes/rm_work.bbclass
index 10e134b95..c478f4a18 100644
--- a/poky/meta/classes/rm_work.bbclass
+++ b/poky/meta/classes/rm_work.bbclass
@@ -164,8 +164,7 @@ python inject_rm_work() {
# Determine what do_build depends upon, without including do_build
# itself or our own special do_rm_work_all.
- deps = set(bb.build.preceedtask('do_build', True, d))
- deps.difference_update(('do_build', 'do_rm_work_all'))
+ deps = sorted((set(bb.build.preceedtask('do_build', True, d))).difference(('do_build', 'do_rm_work_all')) or "")
# deps can be empty if do_build doesn't exist, e.g. *-inital recipes
if not deps:
diff --git a/poky/meta/recipes-core/meta/target-sdk-provides-dummy.bb b/poky/meta/recipes-core/meta/target-sdk-provides-dummy.bb
index edf07c4a2..0160cb8ee 100644
--- a/poky/meta/recipes-core/meta/target-sdk-provides-dummy.bb
+++ b/poky/meta/recipes-core/meta/target-sdk-provides-dummy.bb
@@ -2,9 +2,17 @@ DUMMYARCH = "sdk-provides-dummy-target"
DUMMYPROVIDES = "\
busybox \
+ busybox-dev \
+ busybox-src \
coreutils \
+ coreutils-dev \
+ coreutils-src \
bash \
+ bash-dev \
+ bash-src \
perl \
+ perl-dev \
+ perl-src \
perl-module-re \
perl-module-strict \
perl-module-vars \
@@ -23,16 +31,22 @@ DUMMYPROVIDES = "\
perl-module-file-glob \
perl-module-file-path \
perl-module-file-stat \
+ perl-module-file-temp \
perl-module-getopt-long \
perl-module-io-file \
+ perl-module-overload \
perl-module-posix \
+ perl-module-overload \
perl-module-thread-queue \
perl-module-threads \
+ perl-module-warnings \
/bin/sh \
/bin/bash \
/usr/bin/env \
/usr/bin/perl \
pkgconfig \
+ pkgconfig-dev \
+ pkgconfig-src \
"
require dummy-sdk-package.inc
diff --git a/poky/meta/recipes-core/systemd/systemd/0024-journald-do-not-store-the-iovec-entry-for-process-co.patch b/poky/meta/recipes-core/systemd/systemd/0024-journald-do-not-store-the-iovec-entry-for-process-co.patch
index c3009545b..c2f78be39 100644
--- a/poky/meta/recipes-core/systemd/systemd/0024-journald-do-not-store-the-iovec-entry-for-process-co.patch
+++ b/poky/meta/recipes-core/systemd/systemd/0024-journald-do-not-store-the-iovec-entry-for-process-co.patch
@@ -1,4 +1,4 @@
-From fe19f5a9d0d8b9977e9507a9b66c3cc66744cd38 Mon Sep 17 00:00:00 2001
+From 9cb07e7d82c7c4f28bbaa1478e1387e8ea3d03dd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
Date: Wed, 5 Dec 2018 18:38:39 +0100
Subject: [PATCH] journald: do not store the iovec entry for process
@@ -16,6 +16,10 @@ journal_file_append_entry() returns -E2BIG.
Patch backported from systemd master at
084eeb865ca63887098e0945fb4e93c852b91b0f.
+
+CVE: CVE-2018-16864
+Upstream-Status: Backport
+Signed-off-by: Marcus Cooper <marcusc@axis.com>
---
src/basic/io-util.c | 10 ++++++++++
src/basic/io-util.h | 2 ++
diff --git a/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields-1k.patch b/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields-1k.patch
deleted file mode 100644
index 50a01efe8..000000000
--- a/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields-1k.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From 4566aaf97f5b4143b930d75628f3abc905249dcd Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
-Date: Wed, 5 Dec 2018 22:45:02 +0100
-Subject: [PATCH] journald: set a limit on the number of fields (1k)
-
-We allocate a iovec entry for each field, so with many short entries,
-our memory usage and processing time can be large, even with a relatively
-small message size. Let's refuse overly long entries.
-
-CVE-2018-16865
-https://bugzilla.redhat.com/show_bug.cgi?id=1653861
-
-What from I can see, the problem is not from an alloca, despite what the CVE
-description says, but from the attack multiplication that comes from creating
-many very small iovecs: (void* + size_t) for each three bytes of input message.
-
-Patch backported from systemd master at
-052c57f132f04a3cf4148f87561618da1a6908b4.
----
- src/basic/journal-importer.h | 3 +++
- src/journal/journald-native.c | 5 +++++
- 2 files changed, 8 insertions(+)
-
-diff --git a/src/basic/journal-importer.h b/src/basic/journal-importer.h
-index f49ce734a1..c4ae45d32d 100644
---- a/src/basic/journal-importer.h
-+++ b/src/basic/journal-importer.h
-@@ -16,6 +16,9 @@
- #define DATA_SIZE_MAX (1024*1024*768u)
- #define LINE_CHUNK 8*1024u
-
-+/* The maximum number of fields in an entry */
-+#define ENTRY_FIELD_COUNT_MAX 1024
-+
- struct iovec_wrapper {
- struct iovec *iovec;
- size_t size_bytes;
-diff --git a/src/journal/journald-native.c b/src/journal/journald-native.c
-index 5ff22a10af..951d092053 100644
---- a/src/journal/journald-native.c
-+++ b/src/journal/journald-native.c
-@@ -140,6 +140,11 @@ static int server_process_entry(
- }
-
- /* A property follows */
-+ if (n > ENTRY_FIELD_COUNT_MAX) {
-+ log_debug("Received an entry that has more than " STRINGIFY(ENTRY_FIELD_COUNT_MAX) " fields, ignoring entry.");
-+ r = 1;
-+ goto finish;
-+ }
-
- /* n existing properties, 1 new, +1 for _TRANSPORT */
- if (!GREEDY_REALLOC(iovec, m,
---
-2.11.0
-
diff --git a/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields.patch b/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields.patch
new file mode 100644
index 000000000..ae9ef5de5
--- /dev/null
+++ b/poky/meta/recipes-core/systemd/systemd/0025-journald-set-a-limit-on-the-number-of-fields.patch
@@ -0,0 +1,139 @@
+From 7cad044b72406cbadf048da432c29afea74c3c10 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
+Date: Wed, 5 Dec 2018 22:45:02 +0100
+Subject: [PATCH] journald: set a limit on the number of fields
+
+The fix for CVE-2018-16865 is plucked from two commits that have
+been pushed to systemd master.
+
+journald: set a limit on the number of fields (1k)
+
+We allocate a iovec entry for each field, so with many short entries,
+our memory usage and processing time can be large, even with a relatively
+small message size. Let's refuse overly long entries.
+
+CVE-2018-16865
+https://bugzilla.redhat.com/show_bug.cgi?id=1653861
+
+What from I can see, the problem is not from an alloca, despite what the CVE
+description says, but from the attack multiplication that comes from creating
+many very small iovecs: (void* + size_t) for each three bytes of input message.
+
+Patch backported from systemd master at
+052c57f132f04a3cf4148f87561618da1a6908b4.
+
+journal-remote: set a limit on the number of fields in a message
+
+Existing use of E2BIG is replaced with ENOBUFS (entry too long), and E2BIG is
+reused for the new error condition (too many fields).
+
+This matches the change done for systemd-journald, hence forming the second
+part of the fix for CVE-2018-16865
+(https://bugzilla.redhat.com/show_bug.cgi?id=1653861).
+
+Patch backported from systemd master at
+ef4d6abe7c7fab6cbff975b32e76b09feee56074.
+with the changes applied by 7fdb237f5473cb8fc2129e57e8a0039526dcb4fd
+removed.
+
+CVE: CVE-2018-16865
+Upstream-Status: Backport
+Signed-off-by: Marcus Cooper <marcusc@axis.com>
+---
+ src/basic/journal-importer.c | 5 ++++-
+ src/basic/journal-importer.h | 3 +++
+ src/journal-remote/journal-remote-main.c | 7 ++++++-
+ src/journal-remote/journal-remote.c | 5 ++++-
+ src/journal/journald-native.c | 5 +++++
+ 5 files changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/src/basic/journal-importer.c b/src/basic/journal-importer.c
+index ca203bbbfc..3ac55a66d9 100644
+--- a/src/basic/journal-importer.c
++++ b/src/basic/journal-importer.c
+@@ -23,6 +23,9 @@ enum {
+ };
+
+ static int iovw_put(struct iovec_wrapper *iovw, void* data, size_t len) {
++ if (iovw->count >= ENTRY_FIELD_COUNT_MAX)
++ return -E2BIG;
++
+ if (!GREEDY_REALLOC(iovw->iovec, iovw->size_bytes, iovw->count + 1))
+ return log_oom();
+
+@@ -98,7 +101,7 @@ static int get_line(JournalImporter *imp, char **line, size_t *size) {
+ imp->scanned = imp->filled;
+ if (imp->scanned >= DATA_SIZE_MAX) {
+ log_error("Entry is bigger than %u bytes.", DATA_SIZE_MAX);
+- return -E2BIG;
++ return -ENOBUFS;
+ }
+
+ if (imp->passive_fd)
+diff --git a/src/basic/journal-importer.h b/src/basic/journal-importer.h
+index f49ce734a1..c4ae45d32d 100644
+--- a/src/basic/journal-importer.h
++++ b/src/basic/journal-importer.h
+@@ -16,6 +16,9 @@
+ #define DATA_SIZE_MAX (1024*1024*768u)
+ #define LINE_CHUNK 8*1024u
+
++/* The maximum number of fields in an entry */
++#define ENTRY_FIELD_COUNT_MAX 1024
++
+ struct iovec_wrapper {
+ struct iovec *iovec;
+ size_t size_bytes;
+diff --git a/src/journal-remote/journal-remote-main.c b/src/journal-remote/journal-remote-main.c
+index 8fda9d1499..3a01fef646 100644
+--- a/src/journal-remote/journal-remote-main.c
++++ b/src/journal-remote/journal-remote-main.c
+@@ -212,7 +212,12 @@ static int process_http_upload(
+ break;
+ else if (r < 0) {
+ log_warning("Failed to process data for connection %p", connection);
+- if (r == -E2BIG)
++ if (r == -ENOBUFS)
++ return mhd_respondf(connection,
++ r, MHD_HTTP_PAYLOAD_TOO_LARGE,
++ "Entry is above the maximum of %u, aborting connection %p.",
++ DATA_SIZE_MAX, connection);
++ else if (r == -E2BIG)
+ return mhd_respondf(connection,
+ r, MHD_HTTP_PAYLOAD_TOO_LARGE,
+ "Entry is too large, maximum is " STRINGIFY(DATA_SIZE_MAX) " bytes.");
+diff --git a/src/journal-remote/journal-remote.c b/src/journal-remote/journal-remote.c
+index beb75a1cb4..67e3a70c06 100644
+--- a/src/journal-remote/journal-remote.c
++++ b/src/journal-remote/journal-remote.c
+@@ -408,7 +408,10 @@ int journal_remote_handle_raw_source(
+ log_debug("%zu active sources remaining", s->active);
+ return 0;
+ } else if (r == -E2BIG) {
+- log_notice_errno(E2BIG, "Entry too big, skipped");
++ log_notice("Entry with too many fields, skipped");
++ return 1;
++ } else if (r == -ENOBUFS) {
++ log_notice("Entry too big, skipped");
+ return 1;
+ } else if (r == -EAGAIN) {
+ return 0;
+diff --git a/src/journal/journald-native.c b/src/journal/journald-native.c
+index 5ff22a10af..951d092053 100644
+--- a/src/journal/journald-native.c
++++ b/src/journal/journald-native.c
+@@ -140,6 +140,11 @@ static int server_process_entry(
+ }
+
+ /* A property follows */
++ if (n > ENTRY_FIELD_COUNT_MAX) {
++ log_debug("Received an entry that has more than " STRINGIFY(ENTRY_FIELD_COUNT_MAX) " fields, ignoring entry.");
++ r = 1;
++ goto finish;
++ }
+
+ /* n existing properties, 1 new, +1 for _TRANSPORT */
+ if (!GREEDY_REALLOC(iovec, m,
+--
+2.11.0
+
diff --git a/poky/meta/recipes-core/systemd/systemd/0026-journal-fix-out-of-bounds-read-CVE-2018-16866.patch b/poky/meta/recipes-core/systemd/systemd/0026-journal-fix-out-of-bounds-read-CVE-2018-16866.patch
new file mode 100644
index 000000000..3925a4abb
--- /dev/null
+++ b/poky/meta/recipes-core/systemd/systemd/0026-journal-fix-out-of-bounds-read-CVE-2018-16866.patch
@@ -0,0 +1,49 @@
+From ebd06c37d4311db9851f4d3fdd023de3dd590de0 Mon Sep 17 00:00:00 2001
+From: Filipe Brandenburger <filbranden@google.com>
+Date: Thu, 10 Jan 2019 14:53:33 -0800
+Subject: [PATCH] journal: fix out-of-bounds read CVE-2018-16866
+
+The original code didn't account for the fact that strchr() would match on the
+'\0' character, making it read past the end of the buffer if no non-whitespace
+character was present.
+
+This bug was introduced in commit ec5ff4445cca6a which was first released in
+systemd v221 and later fixed in commit 8595102d3ddde6 which was released in
+v240, so versions in the range [v221, v240) are affected.
+
+Patch backported from systemd-stable at f005e73d3723d62a39be661931fcb6347119b52b
+also includes a change from systemd master which removes a heap buffer overflow
+a6aadf4ae0bae185dc4c414d492a4a781c80ffe5.
+
+CVE: CVE-2018-16866
+Upstream-Status: Backport
+Signed-off-by: Marcus Cooper <marcusc@axis.com>
+---
+ src/journal/journald-syslog.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/src/journal/journald-syslog.c b/src/journal/journald-syslog.c
+index 9dea116722..809b318c06 100644
+--- a/src/journal/journald-syslog.c
++++ b/src/journal/journald-syslog.c
+@@ -194,7 +194,7 @@ size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid)
+ e = l;
+ l--;
+
+- if (p[l-1] == ']') {
++ if (l > 0 && p[l-1] == ']') {
+ size_t k = l-1;
+
+ for (;;) {
+@@ -219,7 +219,7 @@ size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid)
+ if (t)
+ *identifier = t;
+
+- if (strchr(WHITESPACE, p[e]))
++ if (p[e] != '\0' && strchr(WHITESPACE, p[e]))
+ e++;
+ *buf = p + e;
+ return e;
+--
+2.11.0
+
diff --git a/poky/meta/recipes-core/systemd/systemd/0026-journal-remote-set-a-limit-on-the-number-of-fields-i.patch b/poky/meta/recipes-core/systemd/systemd/0026-journal-remote-set-a-limit-on-the-number-of-fields-i.patch
deleted file mode 100644
index 104945cc2..000000000
--- a/poky/meta/recipes-core/systemd/systemd/0026-journal-remote-set-a-limit-on-the-number-of-fields-i.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From 4183ec3a135663128834ca8b35d50a60999343a7 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl>
-Date: Fri, 7 Dec 2018 10:48:10 +0100
-Subject: [PATCH] journal-remote: set a limit on the number of fields in a
- message
-
-Existing use of E2BIG is replaced with ENOBUFS (entry too long), and E2BIG is
-reused for the new error condition (too many fields).
-
-This matches the change done for systemd-journald, hence forming the second
-part of the fix for CVE-2018-16865
-(https://bugzilla.redhat.com/show_bug.cgi?id=1653861).
-
-Patch backported from systemd master at
-ef4d6abe7c7fab6cbff975b32e76b09feee56074.
----
- src/basic/journal-importer.c | 5 ++++-
- src/journal-remote/journal-remote-main.c | 10 ++++++----
- src/journal-remote/journal-remote.c | 5 ++++-
- 3 files changed, 14 insertions(+), 6 deletions(-)
-
-diff --git a/src/basic/journal-importer.c b/src/basic/journal-importer.c
-index ca203bbbfc..3ac55a66d9 100644
---- a/src/basic/journal-importer.c
-+++ b/src/basic/journal-importer.c
-@@ -23,6 +23,9 @@ enum {
- };
-
- static int iovw_put(struct iovec_wrapper *iovw, void* data, size_t len) {
-+ if (iovw->count >= ENTRY_FIELD_COUNT_MAX)
-+ return -E2BIG;
-+
- if (!GREEDY_REALLOC(iovw->iovec, iovw->size_bytes, iovw->count + 1))
- return log_oom();
-
-@@ -98,7 +101,7 @@ static int get_line(JournalImporter *imp, char **line, size_t *size) {
- imp->scanned = imp->filled;
- if (imp->scanned >= DATA_SIZE_MAX) {
- log_error("Entry is bigger than %u bytes.", DATA_SIZE_MAX);
-- return -E2BIG;
-+ return -ENOBUFS;
- }
-
- if (imp->passive_fd)
-diff --git a/src/journal-remote/journal-remote-main.c b/src/journal-remote/journal-remote-main.c
-index 8fda9d1499..f52618fb7b 100644
---- a/src/journal-remote/journal-remote-main.c
-+++ b/src/journal-remote/journal-remote-main.c
-@@ -212,10 +212,12 @@ static int process_http_upload(
- break;
- else if (r < 0) {
- log_warning("Failed to process data for connection %p", connection);
-- if (r == -E2BIG)
-- return mhd_respondf(connection,
-- r, MHD_HTTP_PAYLOAD_TOO_LARGE,
-- "Entry is too large, maximum is " STRINGIFY(DATA_SIZE_MAX) " bytes.");
-+ if (r == -ENOBUFS)
-+ log_warning_errno(r, "Entry is above the maximum of %u, aborting connection %p.",
-+ DATA_SIZE_MAX, connection);
-+ else if (r == -E2BIG)
-+ log_warning_errno(r, "Entry with more fields than the maximum of %u, aborting connection %p.",
-+ ENTRY_FIELD_COUNT_MAX, connection);
- else
- return mhd_respondf(connection,
- r, MHD_HTTP_UNPROCESSABLE_ENTITY,
-diff --git a/src/journal-remote/journal-remote.c b/src/journal-remote/journal-remote.c
-index beb75a1cb4..67e3a70c06 100644
---- a/src/journal-remote/journal-remote.c
-+++ b/src/journal-remote/journal-remote.c
-@@ -408,7 +408,10 @@ int journal_remote_handle_raw_source(
- log_debug("%zu active sources remaining", s->active);
- return 0;
- } else if (r == -E2BIG) {
-- log_notice_errno(E2BIG, "Entry too big, skipped");
-+ log_notice("Entry with too many fields, skipped");
-+ return 1;
-+ } else if (r == -ENOBUFS) {
-+ log_notice("Entry too big, skipped");
- return 1;
- } else if (r == -EAGAIN) {
- return 0;
---
-2.11.0
-
diff --git a/poky/meta/recipes-core/systemd/systemd/0027-journal-fix-syslog_parse_identifier.patch b/poky/meta/recipes-core/systemd/systemd/0027-journal-fix-syslog_parse_identifier.patch
deleted file mode 100644
index d4df0e12f..000000000
--- a/poky/meta/recipes-core/systemd/systemd/0027-journal-fix-syslog_parse_identifier.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From 8ccebb04e07628f7fe10131d6cd4f19d6a0d8f45 Mon Sep 17 00:00:00 2001
-From: Yu Watanabe <watanabe.yu+github@gmail.com>
-Date: Wed, 8 Aug 2018 15:06:36 +0900
-Subject: [PATCH] journal: fix syslog_parse_identifier()
-
-Fixes #9829.
-
-An out of bounds read was discovered in systemd-journald in the way it
-parses log messages that terminate with a colon ':'. A local attacker
-can use this flaw to disclose process memory data.
-
-Patch backported from systemd master at
-a6aadf4ae0bae185dc4c414d492a4a781c80ffe5.
-
-This matches the change done for systemd-journald, hence forming the first
-part of the fix for CVE-2018-16866.
----
- src/journal/journald-syslog.c | 6 +++---
- src/journal/test-journal-syslog.c | 10 ++++++++--
- 2 files changed, 11 insertions(+), 5 deletions(-)
-
-diff --git a/src/journal/journald-syslog.c b/src/journal/journald-syslog.c
-index 9dea116722..97711ac7a3 100644
---- a/src/journal/journald-syslog.c
-+++ b/src/journal/journald-syslog.c
-@@ -194,7 +194,7 @@ size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid)
- e = l;
- l--;
-
-- if (p[l-1] == ']') {
-+ if (l > 0 && p[l-1] == ']') {
- size_t k = l-1;
-
- for (;;) {
-@@ -219,8 +219,8 @@ size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid)
- if (t)
- *identifier = t;
-
-- if (strchr(WHITESPACE, p[e]))
-- e++;
-+ e += strspn(p + e, WHITESPACE);
-+
- *buf = p + e;
- return e;
- }
-diff --git a/src/journal/test-journal-syslog.c b/src/journal/test-journal-syslog.c
-index 9ba86f6c8a..05f759817e 100644
---- a/src/journal/test-journal-syslog.c
-+++ b/src/journal/test-journal-syslog.c
-@@ -5,8 +5,8 @@
- #include "macro.h"
- #include "string-util.h"
-
--static void test_syslog_parse_identifier(const char* str,
-- const char *ident, const char*pid, int ret) {
-+static void test_syslog_parse_identifier(const char *str,
-+ const char *ident, const char *pid, int ret) {
- const char *buf = str;
- _cleanup_free_ char *ident2 = NULL, *pid2 = NULL;
- int ret2;
-@@ -21,7 +21,13 @@ static void test_syslog_parse_identifier(const char* str,
- int main(void) {
- test_syslog_parse_identifier("pidu[111]: xxx", "pidu", "111", 11);
- test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, 6);
-+ test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, 7);
- test_syslog_parse_identifier("pidu xxx", NULL, NULL, 0);
-+ test_syslog_parse_identifier(":", "", NULL, 1);
-+ test_syslog_parse_identifier(": ", "", NULL, 3);
-+ test_syslog_parse_identifier("pidu:", "pidu", NULL, 5);
-+ test_syslog_parse_identifier("pidu: ", "pidu", NULL, 6);
-+ test_syslog_parse_identifier("pidu : ", NULL, NULL, 0);
-
- return 0;
- }
---
-2.11.0
-
diff --git a/poky/meta/recipes-core/systemd/systemd/0028-journal-do-not-remove-multiple-spaces-after-identifi.patch b/poky/meta/recipes-core/systemd/systemd/0028-journal-do-not-remove-multiple-spaces-after-identifi.patch
deleted file mode 100644
index fa2c01034..000000000
--- a/poky/meta/recipes-core/systemd/systemd/0028-journal-do-not-remove-multiple-spaces-after-identifi.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From c3a7da1bbb6d2df8ab7ea1c7ce34ded37a21959f Mon Sep 17 00:00:00 2001
-From: Yu Watanabe <watanabe.yu+github@gmail.com>
-Date: Fri, 10 Aug 2018 11:07:54 +0900
-Subject: [PATCH] journal: do not remove multiple spaces after identifier in
- syslog message
-
-Single space is used as separator.
-C.f. discussions in #156.
-
-Fixes #9839 introduced by a6aadf4ae0bae185dc4c414d492a4a781c80ffe5.
-
-Patch backported from systemd master at
-8595102d3ddde6d25c282f965573a6de34ab4421.
-
-This matches the change done for systemd-journald, hence forming the second
-part of the fix for CVE-2018-16866
----
- src/journal/journald-syslog.c | 4 +++-
- src/journal/test-journal-syslog.c | 24 ++++++++++++++----------
- 2 files changed, 17 insertions(+), 11 deletions(-)
-
-diff --git a/src/journal/journald-syslog.c b/src/journal/journald-syslog.c
-index 97711ac7a3..e0b55cc566 100644
---- a/src/journal/journald-syslog.c
-+++ b/src/journal/journald-syslog.c
-@@ -219,7 +219,9 @@ size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid)
- if (t)
- *identifier = t;
-
-- e += strspn(p + e, WHITESPACE);
-+ /* Single space is used as separator */
-+ if (p[e] != '\0' && strchr(WHITESPACE, p[e]))
-+ e++;
-
- *buf = p + e;
- return e;
-diff --git a/src/journal/test-journal-syslog.c b/src/journal/test-journal-syslog.c
-index 05f759817e..7294cde032 100644
---- a/src/journal/test-journal-syslog.c
-+++ b/src/journal/test-journal-syslog.c
-@@ -6,7 +6,7 @@
- #include "string-util.h"
-
- static void test_syslog_parse_identifier(const char *str,
-- const char *ident, const char *pid, int ret) {
-+ const char *ident, const char *pid, const char *rest, int ret) {
- const char *buf = str;
- _cleanup_free_ char *ident2 = NULL, *pid2 = NULL;
- int ret2;
-@@ -16,18 +16,22 @@ static void test_syslog_parse_identifier(const char *str,
- assert_se(ret == ret2);
- assert_se(ident == ident2 || streq_ptr(ident, ident2));
- assert_se(pid == pid2 || streq_ptr(pid, pid2));
-+ assert_se(streq(buf, rest));
- }
-
- int main(void) {
-- test_syslog_parse_identifier("pidu[111]: xxx", "pidu", "111", 11);
-- test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, 6);
-- test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, 7);
-- test_syslog_parse_identifier("pidu xxx", NULL, NULL, 0);
-- test_syslog_parse_identifier(":", "", NULL, 1);
-- test_syslog_parse_identifier(": ", "", NULL, 3);
-- test_syslog_parse_identifier("pidu:", "pidu", NULL, 5);
-- test_syslog_parse_identifier("pidu: ", "pidu", NULL, 6);
-- test_syslog_parse_identifier("pidu : ", NULL, NULL, 0);
-+ test_syslog_parse_identifier("pidu[111]: xxx", "pidu", "111", "xxx", 11);
-+ test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, "xxx", 6);
-+ test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, " xxx", 6);
-+ test_syslog_parse_identifier("pidu xxx", NULL, NULL, "pidu xxx", 0);
-+ test_syslog_parse_identifier(" pidu xxx", NULL, NULL, " pidu xxx", 0);
-+ test_syslog_parse_identifier("", NULL, NULL, "", 0);
-+ test_syslog_parse_identifier(" ", NULL, NULL, " ", 0);
-+ test_syslog_parse_identifier(":", "", NULL, "", 1);
-+ test_syslog_parse_identifier(": ", "", NULL, " ", 2);
-+ test_syslog_parse_identifier("pidu:", "pidu", NULL, "", 5);
-+ test_syslog_parse_identifier("pidu: ", "pidu", NULL, "", 6);
-+ test_syslog_parse_identifier("pidu : ", NULL, NULL, "pidu : ", 0);
-
- return 0;
- }
---
-2.11.0
-
diff --git a/poky/meta/recipes-core/systemd/systemd/CVE-2019-6454.patch b/poky/meta/recipes-core/systemd/systemd/CVE-2019-6454.patch
new file mode 100644
index 000000000..80170dac0
--- /dev/null
+++ b/poky/meta/recipes-core/systemd/systemd/CVE-2019-6454.patch
@@ -0,0 +1,210 @@
+Description: sd-bus: enforce a size limit for dbus paths, and don't allocate
+ them on the stacka
+Forwarded: no
+
+Patch from: systemd_239-7ubuntu10.8
+
+For information see:
+https://usn.ubuntu.com/3891-1/
+https://git.launchpad.net/ubuntu/+source/systemd/commit/?id=f8e75d5634904c8e672658856508c3a02f349adb
+
+CVE: CVE-2019-6454
+Upstream-Status: Backport
+
+Signed-off-by: George McCollister <george.mccollister@gmail.com>
+
+--- a/src/libsystemd/sd-bus/bus-internal.c
++++ b/src/libsystemd/sd-bus/bus-internal.c
+@@ -45,7 +45,7 @@
+ if (slash)
+ return false;
+
+- return true;
++ return (q - p) <= BUS_PATH_SIZE_MAX;
+ }
+
+ char* object_path_startswith(const char *a, const char *b) {
+--- a/src/libsystemd/sd-bus/bus-internal.h
++++ b/src/libsystemd/sd-bus/bus-internal.h
+@@ -333,6 +333,10 @@
+
+ #define BUS_MESSAGE_SIZE_MAX (128*1024*1024)
+ #define BUS_AUTH_SIZE_MAX (64*1024)
++/* Note that the D-Bus specification states that bus paths shall have no size limit. We enforce here one
++ * anyway, since truly unbounded strings are a security problem. The limit we pick is relatively large however,
++ * to not clash unnecessarily with real-life applications. */
++#define BUS_PATH_SIZE_MAX (64*1024)
+
+ #define BUS_CONTAINER_DEPTH 128
+
+--- a/src/libsystemd/sd-bus/bus-objects.c
++++ b/src/libsystemd/sd-bus/bus-objects.c
+@@ -1134,7 +1134,8 @@
+ const char *path,
+ sd_bus_error *error) {
+
+- char *prefix;
++ _cleanup_free_ char *prefix = NULL;
++ size_t pl;
+ int r;
+
+ assert(bus);
+@@ -1150,7 +1151,12 @@
+ return 0;
+
+ /* Second, add fallback vtables registered for any of the prefixes */
+- prefix = alloca(strlen(path) + 1);
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
++
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ r = object_manager_serialize_path(bus, reply, prefix, path, true, error);
+ if (r < 0)
+@@ -1346,6 +1352,7 @@
+ }
+
+ int bus_process_object(sd_bus *bus, sd_bus_message *m) {
++ _cleanup_free_ char *prefix = NULL;
+ int r;
+ size_t pl;
+ bool found_object = false;
+@@ -1370,9 +1377,12 @@
+ assert(m->member);
+
+ pl = strlen(m->path);
+- do {
+- char prefix[pl+1];
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
+
++ do {
+ bus->nodes_modified = false;
+
+ r = object_find_and_run(bus, m, m->path, false, &found_object);
+@@ -1499,9 +1509,15 @@
+
+ n = hashmap_get(bus->nodes, path);
+ if (!n) {
+- char *prefix;
++ _cleanup_free_ char *prefix = NULL;
++ size_t pl;
++
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
+
+- prefix = alloca(strlen(path) + 1);
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ n = hashmap_get(bus->nodes, prefix);
+ if (n)
+@@ -2091,8 +2107,9 @@
+ char **names) {
+
+ BUS_DONT_DESTROY(bus);
++ _cleanup_free_ char *prefix = NULL;
+ bool found_interface = false;
+- char *prefix;
++ size_t pl;
+ int r;
+
+ assert_return(bus, -EINVAL);
+@@ -2111,6 +2128,12 @@
+ if (names && names[0] == NULL)
+ return 0;
+
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
++
+ do {
+ bus->nodes_modified = false;
+
+@@ -2120,7 +2143,6 @@
+ if (bus->nodes_modified)
+ continue;
+
+- prefix = alloca(strlen(path) + 1);
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ r = emit_properties_changed_on_interface(bus, prefix, path, interface, true, &found_interface, names);
+ if (r != 0)
+@@ -2252,7 +2274,8 @@
+
+ static int object_added_append_all(sd_bus *bus, sd_bus_message *m, const char *path) {
+ _cleanup_set_free_ Set *s = NULL;
+- char *prefix;
++ _cleanup_free_ char *prefix = NULL;
++ size_t pl;
+ int r;
+
+ assert(bus);
+@@ -2297,7 +2320,12 @@
+ if (bus->nodes_modified)
+ return 0;
+
+- prefix = alloca(strlen(path) + 1);
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
++
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ r = object_added_append_all_prefix(bus, m, s, prefix, path, true);
+ if (r < 0)
+@@ -2436,7 +2464,8 @@
+
+ static int object_removed_append_all(sd_bus *bus, sd_bus_message *m, const char *path) {
+ _cleanup_set_free_ Set *s = NULL;
+- char *prefix;
++ _cleanup_free_ char *prefix = NULL;
++ size_t pl;
+ int r;
+
+ assert(bus);
+@@ -2468,7 +2497,12 @@
+ if (bus->nodes_modified)
+ return 0;
+
+- prefix = alloca(strlen(path) + 1);
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
++
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ r = object_removed_append_all_prefix(bus, m, s, prefix, path, true);
+ if (r < 0)
+@@ -2618,7 +2652,8 @@
+ const char *path,
+ const char *interface) {
+
+- char *prefix;
++ _cleanup_free_ char *prefix = NULL;
++ size_t pl;
+ int r;
+
+ assert(bus);
+@@ -2632,7 +2667,12 @@
+ if (bus->nodes_modified)
+ return 0;
+
+- prefix = alloca(strlen(path) + 1);
++ pl = strlen(path);
++ assert(pl <= BUS_PATH_SIZE_MAX);
++ prefix = new(char, pl + 1);
++ if (!prefix)
++ return -ENOMEM;
++
+ OBJECT_PATH_FOREACH_PREFIX(prefix, path) {
+ r = interfaces_added_append_one_prefix(bus, m, prefix, path, interface, true);
+ if (r != 0)
diff --git a/poky/meta/recipes-core/systemd/systemd/sd-bus-if-we-receive-an-invalid-dbus-message-ignore-.patch b/poky/meta/recipes-core/systemd/systemd/sd-bus-if-we-receive-an-invalid-dbus-message-ignore-.patch
new file mode 100644
index 000000000..57311faa6
--- /dev/null
+++ b/poky/meta/recipes-core/systemd/systemd/sd-bus-if-we-receive-an-invalid-dbus-message-ignore-.patch
@@ -0,0 +1,61 @@
+Description: sd-bus: if we receive an invalid dbus message, ignore and
+ proceeed
+ .
+ dbus-daemon might have a slightly different idea of what a valid msg is
+ than us (for example regarding valid msg and field sizes). Let's hence
+ try to proceed if we can and thus drop messages rather than fail the
+ connection if we fail to validate a message.
+ .
+ Hopefully the differences in what is considered valid are not visible
+ for real-life usecases, but are specific to exploit attempts only.
+Author: Lennart Poettering <lennart@poettering.net>
+Forwarded: other,https://github.com/systemd/systemd/pull/11708/
+
+Patch from: systemd_239-7ubuntu10.8
+
+For information see:
+https://usn.ubuntu.com/3891-1/
+https://git.launchpad.net/ubuntu/+source/systemd/commit/?id=f8e75d5634904c8e672658856508c3a02f349adb
+
+CVE: CVE-2019-6454
+Upstream-Status: Backport
+
+Signed-off-by: George McCollister <george.mccollister@gmail.com>
+
+diff --git a/src/libsystemd/sd-bus/bus-socket.c b/src/libsystemd/sd-bus/bus-socket.c
+index 30d6455b6f..441b4a816f 100644
+--- a/src/libsystemd/sd-bus/bus-socket.c
++++ b/src/libsystemd/sd-bus/bus-socket.c
+@@ -1072,7 +1072,7 @@ static int bus_socket_read_message_need(sd_bus *bus, size_t *need) {
+ }
+
+ static int bus_socket_make_message(sd_bus *bus, size_t size) {
+- sd_bus_message *t;
++ sd_bus_message *t = NULL;
+ void *b;
+ int r;
+
+@@ -1097,7 +1097,9 @@ static int bus_socket_make_message(sd_bus *bus, size_t size) {
+ bus->fds, bus->n_fds,
+ NULL,
+ &t);
+- if (r < 0) {
++ if (r == -EBADMSG)
++ log_debug_errno(r, "Received invalid message from connection %s, dropping.", strna(bus->description));
++ else if (r < 0) {
+ free(b);
+ return r;
+ }
+@@ -1108,7 +1110,8 @@ static int bus_socket_make_message(sd_bus *bus, size_t size) {
+ bus->fds = NULL;
+ bus->n_fds = 0;
+
+- bus->rqueue[bus->rqueue_size++] = t;
++ if (t)
++ bus->rqueue[bus->rqueue_size++] = t;
+
+ return 1;
+ }
+--
+2.17.1
+
diff --git a/poky/meta/recipes-core/systemd/systemd_239.bb b/poky/meta/recipes-core/systemd/systemd_239.bb
index 6fbef4716..7fbd64ced 100644
--- a/poky/meta/recipes-core/systemd/systemd_239.bb
+++ b/poky/meta/recipes-core/systemd/systemd_239.bb
@@ -39,10 +39,10 @@ SRC_URI += "file://touchscreen.rules \
file://0002-core-Fix-use-after-free-case-in-load_from_path.patch \
file://0001-meson-rename-Ddebug-to-Ddebug-extra.patch \
file://0024-journald-do-not-store-the-iovec-entry-for-process-co.patch \
- file://0025-journald-set-a-limit-on-the-number-of-fields-1k.patch \
- file://0026-journal-remote-set-a-limit-on-the-number-of-fields-i.patch \
- file://0027-journal-fix-syslog_parse_identifier.patch \
- file://0028-journal-do-not-remove-multiple-spaces-after-identifi.patch \
+ file://0025-journald-set-a-limit-on-the-number-of-fields.patch \
+ file://0026-journal-fix-out-of-bounds-read-CVE-2018-16866.patch \
+ file://CVE-2019-6454.patch \
+ file://sd-bus-if-we-receive-an-invalid-dbus-message-ignore-.patch \
"
# patches made for musl are only applied on TCLIBC is musl
@@ -559,7 +559,7 @@ FILES_${PN} = " ${base_bindir}/* \
FILES_${PN}-dev += "${base_libdir}/security/*.la ${datadir}/dbus-1/interfaces/ ${sysconfdir}/rpm/macros.systemd"
-RDEPENDS_${PN} += "kmod dbus util-linux-mount udev (= ${EXTENDPKGV}) util-linux-agetty util-linux-fsck"
+RDEPENDS_${PN} += "kmod dbus util-linux-mount util-linux-umount udev (= ${EXTENDPKGV}) util-linux-agetty util-linux-fsck"
RDEPENDS_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'serial-getty-generator', '', 'systemd-serialgetty', d)}"
RDEPENDS_${PN} += "volatile-binds update-rc.d systemd-conf"
diff --git a/poky/meta/recipes-extended/pam/libpam_1.3.0.bb b/poky/meta/recipes-extended/pam/libpam_1.3.0.bb
index 3aec2cdb4..cc1241020 100644
--- a/poky/meta/recipes-extended/pam/libpam_1.3.0.bb
+++ b/poky/meta/recipes-extended/pam/libpam_1.3.0.bb
@@ -7,7 +7,9 @@ SECTION = "base"
# /etc/pam.d comes from Debian libpam-runtime in 2009-11 (at that time
# libpam-runtime-1.0.1 is GPLv2+), by openembedded
LICENSE = "GPLv2+ | BSD"
-LIC_FILES_CHKSUM = "file://COPYING;md5=7eb5c1bf854e8881005d673599ee74d3"
+LIC_FILES_CHKSUM = "file://COPYING;md5=7eb5c1bf854e8881005d673599ee74d3 \
+ file://libpamc/License;md5=a4da476a14c093fdc73be3c3c9ba8fb3 \
+ "
SRC_URI = "http://linux-pam.org/library/Linux-PAM-${PV}.tar.bz2 \
file://99_pam \
diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-net-expose-sk-wmem-in-sock_exceed_buf_limit-trac.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-net-expose-sk-wmem-in-sock_exceed_buf_limit-trac.patch
deleted file mode 100644
index 92e12df17..000000000
--- a/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Fix-net-expose-sk-wmem-in-sock_exceed_buf_limit-trac.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From 9e67b4c94b94493123d38379bd9b3eceae23a6f1 Mon Sep 17 00:00:00 2001
-From: Michael Jeanson <mjeanson@efficios.com>
-Date: Fri, 7 Sep 2018 12:21:12 -0400
-Subject: [PATCH] Fix: net: expose sk wmem in sock_exceed_buf_limit tracepoint
- (4.19)
-
-See upstream commit:
-
- commit d6f19938eb031ee2158272757db33258153ae59c
- Author: Yafang Shao <laoar.shao@gmail.com>
- Date: Sun Jul 1 23:31:30 2018 +0800
-
- net: expose sk wmem in sock_exceed_buf_limit tracepoint
-
- Currently trace_sock_exceed_buf_limit() only show rmem info,
- but wmem limit may also be hit.
- So expose wmem info in this tracepoint as well.
-
- Regarding memcg, I think it is better to introduce a new tracepoint(if
- that is needed), i.e. trace_memcg_limit_hit other than show memcg info in
- trace_sock_exceed_buf_limit.
-
-Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-Upstream-Status: Backport
-Signed-off-by: He Zhe <zhe.he@windriver.com>
----
- instrumentation/events/lttng-module/sock.h | 23 ++++++++++++++++++++++-
- 1 file changed, 22 insertions(+), 1 deletion(-)
-
-diff --git a/instrumentation/events/lttng-module/sock.h b/instrumentation/events/lttng-module/sock.h
-index 5cd02ca..cd0c92b 100644
---- a/instrumentation/events/lttng-module/sock.h
-+++ b/instrumentation/events/lttng-module/sock.h
-@@ -21,7 +21,28 @@ LTTNG_TRACEPOINT_EVENT(sock_rcvqueue_full,
- )
- )
-
--#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
-+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0))
-+
-+LTTNG_TRACEPOINT_EVENT(sock_exceed_buf_limit,
-+
-+ TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
-+
-+ TP_ARGS(sk, prot, allocated, kind),
-+
-+ TP_FIELDS(
-+ ctf_string(name, prot->name)
-+ ctf_array(long, sysctl_mem, prot->sysctl_mem, 3)
-+ ctf_integer(long, allocated, allocated)
-+ ctf_integer(int, sysctl_rmem, sk_get_rmem0(sk, prot))
-+ ctf_integer(int, rmem_alloc, atomic_read(&sk->sk_rmem_alloc))
-+ ctf_integer(int, sysctl_wmem, sk_get_wmem0(sk, prot))
-+ ctf_integer(int, wmem_alloc, refcount_read(&sk->sk_wmem_alloc))
-+ ctf_integer(int, wmem_queued, sk->sk_wmem_queued)
-+ ctf_integer(int, kind, kind)
-+ )
-+)
-+
-+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
-
- LTTNG_TRACEPOINT_EVENT(sock_exceed_buf_limit,
-
---
-2.7.4
-
diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules_2.10.7.bb b/poky/meta/recipes-kernel/lttng/lttng-modules_2.10.9.bb
index f6c865a87..d29737775 100644
--- a/poky/meta/recipes-kernel/lttng/lttng-modules_2.10.7.bb
+++ b/poky/meta/recipes-kernel/lttng/lttng-modules_2.10.9.bb
@@ -15,11 +15,10 @@ COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm).*-linux'
SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \
file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \
file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \
- file://0001-Fix-net-expose-sk-wmem-in-sock_exceed_buf_limit-trac.patch \
"
-SRC_URI[md5sum] = "d3cb4520948083bf1573a2e4cb7406aa"
-SRC_URI[sha256sum] = "f049428d3d131e103a7a7038d184731bf7bcdce00503fc19a2c9b5693ecbb3b5"
+SRC_URI[md5sum] = "09df0ac2e8f245740a2f32411d10c0d1"
+SRC_URI[sha256sum] = "a1855bbd02d0f71ebd180e9872309862036624f012442ab9cc5852eb60340145"
export INSTALL_MOD_DIR="kernel/lttng-modules"
diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools/0001-Allow-multiple-attempts-to-connect-to-relayd.patch b/poky/meta/recipes-kernel/lttng/lttng-tools/0001-Allow-multiple-attempts-to-connect-to-relayd.patch
index 62a097859..0998fc386 100644
--- a/poky/meta/recipes-kernel/lttng/lttng-tools/0001-Allow-multiple-attempts-to-connect-to-relayd.patch
+++ b/poky/meta/recipes-kernel/lttng/lttng-tools/0001-Allow-multiple-attempts-to-connect-to-relayd.patch
@@ -16,17 +16,17 @@ Signed-off-by: Mikael Beckius <mikael.beckius@windriver.com>
Signed-off-by: He Zhe <zhe.he@windriver.com>
Upstream-Status: Pending
---
- src/bin/lttng-sessiond/cmd.c | 8 --------
+ src/bin/lttng-sessiond/cmd.c | 8 --------
1 file changed, 8 deletions(-)
diff --git a/src/bin/lttng-sessiond/cmd.c b/src/bin/lttng-sessiond/cmd.c
-index 73b4ce3..36f62ee 100644
+index cf30b8e..cc41a48 100644
--- a/src/bin/lttng-sessiond/cmd.c
+++ b/src/bin/lttng-sessiond/cmd.c
-@@ -689,14 +689,6 @@ close_sock:
- free(rsock);
+@@ -945,14 +945,6 @@ static int send_consumer_relayd_socket(enum lttng_domain_type domain,
+ */
- error:
+ close_sock:
- if (ret != LTTNG_OK) {
- /*
- * The consumer output for this session should not be used anymore
@@ -35,9 +35,8 @@ index 73b4ce3..36f62ee 100644
- */
- consumer->enabled = 0;
- }
- return ret;
- }
+ (void) relayd_close(rsock);
+ free(rsock);
--
-1.7.9.5
-
+2.17.1
diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools_2.9.5.bb b/poky/meta/recipes-kernel/lttng/lttng-tools_2.9.11.bb
index 0314b5363..5e3fc1aab 100644
--- a/poky/meta/recipes-kernel/lttng/lttng-tools_2.9.5.bb
+++ b/poky/meta/recipes-kernel/lttng/lttng-tools_2.9.11.bb
@@ -34,8 +34,8 @@ SRC_URI = "https://lttng.org/files/lttng-tools/lttng-tools-${PV}.tar.bz2 \
file://lttng-sessiond.service \
"
-SRC_URI[md5sum] = "051224eb991aee07f8721ff1877d0b96"
-SRC_URI[sha256sum] = "77839eb6fc6c652125f08acfd9369701c2516eb05cc2084160e7efc7a3fb731c"
+SRC_URI[md5sum] = "f9c2b35810790f5bd802483eb14cb301"
+SRC_URI[sha256sum] = "2c45144acf8dc6fcd655be7370a022e9c03c8b7419af489c9c2e786a335006db"
inherit autotools ptest pkgconfig useradd python3-dir manpages systemd
diff --git a/poky/meta/recipes-kernel/lttng/lttng-ust_2.10.1.bb b/poky/meta/recipes-kernel/lttng/lttng-ust_2.10.3.bb
index d79a47931..b5c43200d 100644
--- a/poky/meta/recipes-kernel/lttng/lttng-ust_2.10.1.bb
+++ b/poky/meta/recipes-kernel/lttng/lttng-ust_2.10.3.bb
@@ -23,8 +23,8 @@ PE = "2"
SRC_URI = "https://lttng.org/files/lttng-ust/lttng-ust-${PV}.tar.bz2 \
file://lttng-ust-doc-examples-disable.patch \
"
-SRC_URI[md5sum] = "4863cc2f9f0a070b42438bb646bbba06"
-SRC_URI[sha256sum] = "07cc3c0b71e7b77f1913d5b7f340a78a9af414440e4662712aef2d635b88ee9d"
+SRC_URI[md5sum] = "ffcfa8c1ba9a52f002d240e936e9afa2"
+SRC_URI[sha256sum] = "9e8420f90d5f963f7aa32bc6d44adc1e491136f687c69ffb7a3075d33b40852b"
CVE_PRODUCT = "ust"
diff --git a/poky/meta/recipes-multimedia/libpng/libpng/CVE-2019-7317.patch b/poky/meta/recipes-multimedia/libpng/libpng/CVE-2019-7317.patch
new file mode 100644
index 000000000..6ee1f8da3
--- /dev/null
+++ b/poky/meta/recipes-multimedia/libpng/libpng/CVE-2019-7317.patch
@@ -0,0 +1,20 @@
+Use-after-free detected with static analysis.
+
+CVE: CVE-2019-7317
+Upstream-Status: Submitted [https://github.com/glennrp/libpng/issues/275]
+Signed-off-by: Ross Burton <ross.burton@intel.com>
+
+diff --git a/png.c b/png.c
+index 9d9926f638..efd1aecfbd 100644
+--- a/png.c
++++ b/png.c
+@@ -4588,8 +4588,7 @@ png_image_free(png_imagep image)
+ if (image != NULL && image->opaque != NULL &&
+ image->opaque->error_buf == NULL)
+ {
+- /* Ignore errors here: */
+- (void)png_safe_execute(image, png_image_free_function, image);
++ png_image_free_function(image);
+ image->opaque = NULL;
+ }
+ }
diff --git a/poky/meta/recipes-multimedia/libpng/libpng_1.6.36.bb b/poky/meta/recipes-multimedia/libpng/libpng_1.6.36.bb
index 3cf4f7249..a58623788 100644
--- a/poky/meta/recipes-multimedia/libpng/libpng_1.6.36.bb
+++ b/poky/meta/recipes-multimedia/libpng/libpng_1.6.36.bb
@@ -9,7 +9,8 @@ DEPENDS = "zlib"
LIBV = "16"
-SRC_URI = "${SOURCEFORGE_MIRROR}/project/${BPN}/${BPN}${LIBV}/${PV}/${BP}.tar.xz"
+SRC_URI = "${SOURCEFORGE_MIRROR}/project/${BPN}/${BPN}${LIBV}/${PV}/${BP}.tar.xz \
+ file://CVE-2019-7317.patch"
SRC_URI[md5sum] = "df2be2d29c40937fe1f5349b16bc2826"
SRC_URI[sha256sum] = "eceb924c1fa6b79172fdfd008d335f0e59172a86a66481e09d4089df872aa319"
diff --git a/poky/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2018-19432.patch b/poky/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2018-19432.patch
new file mode 100644
index 000000000..8ded2c0f8
--- /dev/null
+++ b/poky/meta/recipes-multimedia/libsndfile/libsndfile1/CVE-2018-19432.patch
@@ -0,0 +1,115 @@
+From 6f3266277bed16525f0ac2f0f03ff4626f1923e5 Mon Sep 17 00:00:00 2001
+From: Erik de Castro Lopo <erikd@mega-nerd.com>
+Date: Thu, 8 Mar 2018 18:00:21 +1100
+Subject: [PATCH] Fix max channel count bug
+
+The code was allowing files to be written with a channel count of exactly
+`SF_MAX_CHANNELS` but was failing to read some file formats with the same
+channel count.
+
+Upstream-Status: Backport [https://github.com/erikd/libsndfile/
+commit/6f3266277bed16525f0ac2f0f03ff4626f1923e5]
+
+CVE: CVE-2018-19432
+
+Signed-off-by: Changqing Li <changqing.li@windriver.com>
+
+---
+ src/aiff.c | 6 +++---
+ src/rf64.c | 4 ++--
+ src/w64.c | 4 ++--
+ src/wav.c | 4 ++--
+ 4 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/src/aiff.c b/src/aiff.c
+index fbd43cb..6386bce 100644
+--- a/src/aiff.c
++++ b/src/aiff.c
+@@ -1,5 +1,5 @@
+ /*
+-** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com>
++** Copyright (C) 1999-2018 Erik de Castro Lopo <erikd@mega-nerd.com>
+ ** Copyright (C) 2005 David Viens <davidv@plogue.com>
+ **
+ ** This program is free software; you can redistribute it and/or modify
+@@ -950,7 +950,7 @@ aiff_read_header (SF_PRIVATE *psf, COMM_
+ if (psf->sf.channels < 1)
+ return SFE_CHANNEL_COUNT_ZERO ;
+
+- if (psf->sf.channels >= SF_MAX_CHANNELS)
++ if (psf->sf.channels > SF_MAX_CHANNELS)
+ return SFE_CHANNEL_COUNT ;
+
+ if (! (found_chunk & HAVE_FORM))
+@@ -1030,7 +1030,7 @@ aiff_read_comm_chunk (SF_PRIVATE *psf, C
+ psf_log_printf (psf, " Sample Rate : %d\n", samplerate) ;
+ psf_log_printf (psf, " Frames : %u%s\n", comm_fmt->numSampleFrames, (comm_fmt->numSampleFrames == 0 && psf->filelength > 104) ? " (Should not be 0)" : "") ;
+
+- if (comm_fmt->numChannels < 1 || comm_fmt->numChannels >= SF_MAX_CHANNELS)
++ if (comm_fmt->numChannels < 1 || comm_fmt->numChannels > SF_MAX_CHANNELS)
+ { psf_log_printf (psf, " Channels : %d (should be >= 1 and < %d)\n", comm_fmt->numChannels, SF_MAX_CHANNELS) ;
+ return SFE_CHANNEL_COUNT_BAD ;
+ } ;
+diff --git a/src/rf64.c b/src/rf64.c
+index d57f0f3..876cd45 100644
+--- a/src/rf64.c
++++ b/src/rf64.c
+@@ -1,5 +1,5 @@
+ /*
+-** Copyright (C) 2008-2017 Erik de Castro Lopo <erikd@mega-nerd.com>
++** Copyright (C) 2008-2018 Erik de Castro Lopo <erikd@mega-nerd.com>
+ ** Copyright (C) 2009 Uli Franke <cls@nebadje.org>
+ **
+ ** This program is free software; you can redistribute it and/or modify
+@@ -382,7 +382,7 @@ rf64_read_header (SF_PRIVATE *psf, int *
+ if (psf->sf.channels < 1)
+ return SFE_CHANNEL_COUNT_ZERO ;
+
+- if (psf->sf.channels >= SF_MAX_CHANNELS)
++ if (psf->sf.channels > SF_MAX_CHANNELS)
+ return SFE_CHANNEL_COUNT ;
+
+ /* WAVs can be little or big endian */
+diff --git a/src/w64.c b/src/w64.c
+index 939b716..a37d2c5 100644
+--- a/src/w64.c
++++ b/src/w64.c
+@@ -1,5 +1,5 @@
+ /*
+-** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com>
++** Copyright (C) 1999-2018 Erik de Castro Lopo <erikd@mega-nerd.com>
+ **
+ ** This program is free software; you can redistribute it and/or modify
+ ** it under the terms of the GNU Lesser General Public License as published by
+@@ -383,7 +383,7 @@ w64_read_header (SF_PRIVATE *psf, int *b
+ if (psf->sf.channels < 1)
+ return SFE_CHANNEL_COUNT_ZERO ;
+
+- if (psf->sf.channels >= SF_MAX_CHANNELS)
++ if (psf->sf.channels > SF_MAX_CHANNELS)
+ return SFE_CHANNEL_COUNT ;
+
+ psf->endian = SF_ENDIAN_LITTLE ; /* All W64 files are little endian. */
+diff --git a/src/wav.c b/src/wav.c
+index 7bd97bc..dc97545 100644
+--- a/src/wav.c
++++ b/src/wav.c
+@@ -1,5 +1,5 @@
+ /*
+-** Copyright (C) 1999-2016 Erik de Castro Lopo <erikd@mega-nerd.com>
++** Copyright (C) 1999-2018 Erik de Castro Lopo <erikd@mega-nerd.com>
+ ** Copyright (C) 2004-2005 David Viens <davidv@plogue.com>
+ **
+ ** This program is free software; you can redistribute it and/or modify
+@@ -627,7 +627,7 @@ wav_read_header (SF_PRIVATE *psf, int *b
+ if (psf->sf.channels < 1)
+ return SFE_CHANNEL_COUNT_ZERO ;
+
+- if (psf->sf.channels >= SF_MAX_CHANNELS)
++ if (psf->sf.channels > SF_MAX_CHANNELS)
+ return SFE_CHANNEL_COUNT ;
+
+ if (format != WAVE_FORMAT_PCM && (parsestage & HAVE_fact) == 0)
+--
+1.7.9.5
+
diff --git a/poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb b/poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
index 13248f5cb..9700f4a6e 100644
--- a/poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
+++ b/poky/meta/recipes-multimedia/libsndfile/libsndfile1_1.0.28.bb
@@ -14,6 +14,7 @@ SRC_URI = "http://www.mega-nerd.com/libsndfile/files/libsndfile-${PV}.tar.gz \
file://CVE-2017-14634.patch \
file://CVE-2018-13139.patch \
file://0001-a-ulaw-fix-multiple-buffer-overflows-432.patch \
+ file://CVE-2018-19432.patch \
"
SRC_URI[md5sum] = "646b5f98ce89ac60cdb060fcd398247c"
diff --git a/poky/meta/recipes-support/ca-certificates/ca-certificates_20180409.bb b/poky/meta/recipes-support/ca-certificates/ca-certificates_20190110.bb
index 0d57083c5..b9f57900c 100644
--- a/poky/meta/recipes-support/ca-certificates/ca-certificates_20180409.bb
+++ b/poky/meta/recipes-support/ca-certificates/ca-certificates_20190110.bb
@@ -14,7 +14,7 @@ DEPENDS_class-nativesdk = "openssl-native"
# Need c_rehash from openssl and run-parts from debianutils
PACKAGE_WRITE_DEPS += "openssl-native debianutils-native"
-SRCREV = "dbbd11e56af93bb79f21d0ee6059a901f83f70a5"
+SRCREV = "c28799b138b044c963d24c4a69659b6e5486e3be"
SRC_URI = "git://salsa.debian.org/debian/ca-certificates.git;protocol=https \
file://0002-update-ca-certificates-use-SYSROOT.patch \
diff --git a/poky/scripts/send-error-report b/poky/scripts/send-error-report
index 3528cf93a..0ed7cc905 100755
--- a/poky/scripts/send-error-report
+++ b/poky/scripts/send-error-report
@@ -62,7 +62,7 @@ def edit_content(json_file_path):
def prepare_data(args):
# attempt to get the max_log_size from the server's settings
- max_log_size = getPayloadLimit("https://"+args.server+"/ClientPost/JSON")
+ max_log_size = getPayloadLimit(args.protocol+args.server+"/ClientPost/JSON")
if not os.path.isfile(args.error_file):
log.error("No data file found.")
@@ -132,9 +132,9 @@ def send_data(data, args):
headers={'Content-type': 'application/json', 'User-Agent': "send-error-report/"+version}
if args.json:
- url = "https://"+args.server+"/ClientPost/JSON/"
+ url = args.protocol+args.server+"/ClientPost/JSON/"
else:
- url = "https://"+args.server+"/ClientPost/"
+ url = args.protocol+args.server+"/ClientPost/"
req = urllib.request.Request(url, data=data, headers=headers)
try:
@@ -187,6 +187,11 @@ if __name__ == '__main__':
help="Return the result in json format, silences all other output",
action="store_true")
+ arg_parse.add_argument("--no-ssl",
+ help="Use http instead of https protocol",
+ dest="protocol",
+ action="store_const", const="http://", default="https://")
+
args = arg_parse.parse_args()
OpenPOWER on IntegriCloud