diff options
author | Brad Bishop <bradleyb@fuzziesquirrel.com> | 2017-12-04 01:01:44 -0500 |
---|---|---|
committer | Brad Bishop <bradleyb@fuzziesquirrel.com> | 2017-12-14 17:17:23 +0000 |
commit | 37a0e4ddff58c0120cc5cfef104b60d0e180638c (patch) | |
tree | 1628857a2eb33ab517ba93d6a3ca25e55bd3e628 | |
parent | 3c4c45d1e9a2324191a8640b22df1b71f15f3037 (diff) | |
download | talos-openbmc-37a0e4ddff58c0120cc5cfef104b60d0e180638c.tar.gz talos-openbmc-37a0e4ddff58c0120cc5cfef104b60d0e180638c.zip |
Squashed 'import-layers/yocto-poky/' changes from dc8508f6099..67491b0c104
Yocto 2.2.2 (Morty)
Change-Id: Id9a452e28940d9f166957de243d9cb1d8818704e
git-subtree-dir: import-layers/yocto-poky
git-subtree-split: 67491b0c104101bb9f366d697edd23c895be4302
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
377 files changed, 23080 insertions, 2574 deletions
diff --git a/import-layers/yocto-poky/bitbake/bin/bitbake-worker b/import-layers/yocto-poky/bitbake/bin/bitbake-worker index 500f2ad16..db3c4b184 100755 --- a/import-layers/yocto-poky/bitbake/bin/bitbake-worker +++ b/import-layers/yocto-poky/bitbake/bin/bitbake-worker @@ -11,7 +11,10 @@ import select import errno import signal import pickle +import traceback +import queue from multiprocessing import Lock +from threading import Thread if sys.getfilesystemencoding() != "utf-8": sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.") @@ -63,7 +66,7 @@ if 0: consolelog.setFormatter(conlogformat) logger.addHandler(consolelog) -worker_queue = b"" +worker_queue = queue.Queue() def worker_fire(event, d): data = b"<event>" + pickle.dumps(event) + b"</event>" @@ -72,21 +75,38 @@ def worker_fire(event, d): def worker_fire_prepickled(event): global worker_queue - worker_queue = worker_queue + event - worker_flush() + worker_queue.put(event) -def worker_flush(): - global worker_queue, worker_pipe +# +# We can end up with write contention with the cooker, it can be trying to send commands +# and we can be trying to send event data back. Therefore use a separate thread for writing +# back data to cooker. +# +worker_thread_exit = False - if not worker_queue: - return +def worker_flush(worker_queue): + worker_queue_int = b"" + global worker_pipe, worker_thread_exit - try: - written = os.write(worker_pipe, worker_queue) - worker_queue = worker_queue[written:] - except (IOError, OSError) as e: - if e.errno != errno.EAGAIN and e.errno != errno.EPIPE: - raise + while True: + try: + worker_queue_int = worker_queue_int + worker_queue.get(True, 1) + except queue.Empty: + pass + while (worker_queue_int or not worker_queue.empty()): + try: + if not worker_queue.empty(): + worker_queue_int = worker_queue_int + worker_queue.get() + written = os.write(worker_pipe, worker_queue_int) + worker_queue_int = worker_queue_int[written:] + except (IOError, OSError) as e: + if e.errno != errno.EAGAIN and e.errno != errno.EPIPE: + raise + if worker_thread_exit and worker_queue.empty() and not worker_queue_int: + return + +worker_thread = Thread(target=worker_flush, args=(worker_queue,)) +worker_thread.start() def worker_child_fire(event, d): global worker_pipe @@ -234,9 +254,9 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append if quieterrors: the_data.setVarFlag(taskname, "quieterrors", "1") - except Exception as exc: + except Exception: if not quieterrors: - logger.critical(str(exc)) + logger.critical(traceback.format_exc()) os._exit(1) try: if cfg.dry_run: @@ -352,7 +372,6 @@ class BitbakeWorker(object): self.build_pipes[pipe].read() if len(self.build_pids): self.process_waitpid() - worker_flush() def handle_item(self, item, func): @@ -457,8 +476,10 @@ except BaseException as e: import traceback sys.stderr.write(traceback.format_exc()) sys.stderr.write(str(e)) -while len(worker_queue): - worker_flush() + +worker_thread_exit = True +worker_thread.join() + workerlog_write("exitting") sys.exit(0) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/build.py b/import-layers/yocto-poky/bitbake/lib/bb/build.py index c4c8aeb64..b59a49bc1 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/build.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/build.py @@ -92,6 +92,7 @@ class TaskBase(event.Event): def __init__(self, t, logfile, d): self._task = t self._package = d.getVar("PF", True) + self._mc = d.getVar("BB_CURRENT_MC", True) self.taskfile = d.getVar("FILE", True) self.taskname = self._task self.logfile = logfile @@ -723,7 +724,7 @@ def make_stamp(task, d, file_name = None): for mask in cleanmask: for name in glob.glob(mask): # Preserve sigdata files in the stamps directory - if "sigdata" in name: + if "sigdata" in name or "sigbasedata" in name: continue # Preserve taint files in the stamps directory if name.endswith('.taint'): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py b/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py index 25938d658..5d2d44065 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py @@ -342,8 +342,7 @@ class ShellParser(): except pyshlex.NeedMore: raise sherrors.ShellSyntaxError("Unexpected EOF") - for token in tokens: - self.process_tokens(token) + self.process_tokens(tokens) def process_tokens(self, tokens): """Process a supplied portion of the syntax tree as returned by @@ -389,18 +388,24 @@ class ShellParser(): "case_clause": case_clause, } - for token in tokens: - name, value = token - try: - more_tokens, words = token_handlers[name](value) - except KeyError: - raise NotImplementedError("Unsupported token type " + name) + def process_token_list(tokens): + for token in tokens: + if isinstance(token, list): + process_token_list(token) + continue + name, value = token + try: + more_tokens, words = token_handlers[name](value) + except KeyError: + raise NotImplementedError("Unsupported token type " + name) + + if more_tokens: + self.process_tokens(more_tokens) - if more_tokens: - self.process_tokens(more_tokens) + if words: + self.process_words(words) - if words: - self.process_words(words) + process_token_list(tokens) def process_words(self, words): """Process a set of 'words' in pyshyacc parlance, which includes diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py index 42831e277..07897be27 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py @@ -252,6 +252,10 @@ class BBCooker: signal.signal(signal.SIGHUP, self.sigterm_exception) def config_notifications(self, event): + if event.maskname == "IN_Q_OVERFLOW": + bb.warn("inotify event queue overflowed, invalidating caches.") + self.baseconfig_valid = False + return if not event.pathname in self.configwatcher.bbwatchedfiles: return if not event.pathname in self.inotify_modified_files: @@ -259,6 +263,10 @@ class BBCooker: self.baseconfig_valid = False def notifications(self, event): + if event.maskname == "IN_Q_OVERFLOW": + bb.warn("inotify event queue overflowed, invalidating caches.") + self.parsecache_valid = False + return if not event.pathname in self.inotify_modified_files: self.inotify_modified_files.append(event.pathname) self.parsecache_valid = False @@ -657,8 +665,40 @@ class BBCooker: # A task of None means use the default task if task is None: task = self.configuration.cmd + if not task.startswith("do_"): + task = "do_%s" % task + + targetlist = self.checkPackages(pkgs_to_build, task) + fulltargetlist = [] + defaulttask_implicit = '' + defaulttask_explicit = False + wildcard = False + + # Wild card expansion: + # Replace string such as "multiconfig:*:bash" + # into "multiconfig:A:bash multiconfig:B:bash bash" + for k in targetlist: + if k.startswith("multiconfig:"): + if wildcard: + bb.fatal('multiconfig conflict') + if k.split(":")[1] == "*": + wildcard = True + for mc in self.multiconfigs: + if mc: + fulltargetlist.append(k.replace('*', mc)) + # implicit default task + else: + defaulttask_implicit = k.split(":")[2] + else: + fulltargetlist.append(k) + else: + defaulttask_explicit = True + fulltargetlist.append(k) - fulltargetlist = self.checkPackages(pkgs_to_build, task) + if not defaulttask_explicit and defaulttask_implicit != '': + fulltargetlist.append(defaulttask_implicit) + + bb.debug(1,"Target list: %s" % (str(fulltargetlist))) taskdata = {} localdata = {} @@ -715,6 +755,9 @@ class BBCooker: Create a dependency graph of pkgs_to_build including reverse dependency information. """ + if not task.startswith("do_"): + task = "do_%s" % task + runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) rq.rqdata.prepare() @@ -818,6 +861,9 @@ class BBCooker: """ Create a dependency tree of pkgs_to_build, returning the data. """ + if not task.startswith("do_"): + task = "do_%s" % task + _, taskdata = self.prepareTreeData(pkgs_to_build, task) seen_fns = [] @@ -1318,6 +1364,8 @@ class BBCooker: # If we are told to do the None task then query the default task if (task == None): task = self.configuration.cmd + if not task.startswith("do_"): + task = "do_%s" % task fn, cls, mc = bb.cache.virtualfn2realfn(buildfile) fn = self.matchFile(fn) @@ -1354,8 +1402,6 @@ class BBCooker: # Invalidate task for target if force mode active if self.configuration.force: logger.verbose("Invalidate task %s, %s", task, fn) - if not task.startswith("do_"): - task = "do_%s" % task bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn) # Setup taskdata structure @@ -1367,8 +1413,6 @@ class BBCooker: bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.expanded_data) # Execute the runqueue - if not task.startswith("do_"): - task = "do_%s" % task runlist = [[mc, item, task, fn]] rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist) @@ -1579,7 +1623,8 @@ class BBCooker: if self.state != state.parsing and not self.parsecache_valid: self.parseConfiguration () if CookerFeatures.SEND_SANITYEVENTS in self.featureset: - bb.event.fire(bb.event.SanityCheck(False), self.data) + for mc in self.multiconfigs: + bb.event.fire(bb.event.SanityCheck(False), self.databuilder.mcdata[mc]) for mc in self.multiconfigs: ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED", True) or "" diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py index b07c26643..98f56ac7b 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py @@ -288,7 +288,7 @@ class CookerDataBuilder(object): multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split() for config in multiconfig: - mcdata = self.parseConfigurationFiles(['conf/multiconfig/%s.conf' % config] + self.prefiles, self.postfiles) + mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config) bb.event.fire(bb.event.ConfigParsed(), mcdata) self.mcdata[config] = mcdata @@ -304,8 +304,9 @@ class CookerDataBuilder(object): def _findLayerConf(self, data): return findConfigFile("bblayers.conf", data) - def parseConfigurationFiles(self, prefiles, postfiles): + def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"): data = bb.data.createCopy(self.basedata) + data.setVar("BB_CURRENT_MC", mc) # Parse files for loading *before* bitbake.conf and any includes for f in prefiles: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data.py b/import-layers/yocto-poky/bitbake/lib/bb/data.py index c1f27cd0c..c56965c60 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/data.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/data.py @@ -258,11 +258,13 @@ def exported_keys(d): not d.getVarFlag(key, 'unexport', False)) def exported_vars(d): - for key in exported_keys(d): + k = list(exported_keys(d)) + for key in k: try: value = d.getVar(key, True) - except Exception: - pass + except Exception as err: + bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE", True), key, err)) + continue if value is not None: yield key, str(value) diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py index f100446dc..805a9a71f 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py @@ -748,13 +748,14 @@ class DataSmart(MutableMapping): if match: removes.extend(self.expand(r).split()) - filtered = filter(lambda v: v not in removes, - value.split()) - value = " ".join(filtered) - if expand and var in self.expand_cache: - # We need to ensure the expand cache has the correct value - # flag == "_content" here - self.expand_cache[var].value = value + if removes: + filtered = filter(lambda v: v not in removes, + value.split()) + value = " ".join(filtered) + if expand and var in self.expand_cache: + # We need to ensure the expand cache has the correct value + # flag == "_content" here + self.expand_cache[var].value = value return value def delVarFlag(self, var, flag, **loginfo): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py index ecb946aa8..23d48acb0 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py @@ -108,9 +108,8 @@ class Wget(FetchMethod): bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile)) fetchcmd += " -O " + dldir + os.sep + ud.localfile - if ud.user: - up = ud.user.split(":") - fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (up[0],up[1]) + if ud.user and ud.pswd: + fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd) uri = ud.url.split(";")[0] if os.path.exists(ud.localpath): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py index 84b268580..9384c72ba 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py @@ -289,8 +289,8 @@ class RunQueueData: return tid + task_name_suffix def get_short_user_idstring(self, task, task_name_suffix = ""): - (mc, fn, taskname, _) = split_tid_mcfn(task) - pn = self.dataCaches[mc].pkg_fn[fn] + (mc, fn, taskname, taskfn) = split_tid_mcfn(task) + pn = self.dataCaches[mc].pkg_fn[taskfn] taskname = taskname_from_tid(task) + task_name_suffix return "%s:%s" % (pn, taskname) @@ -884,14 +884,14 @@ class RunQueueData: if not self.cooker.configuration.nosetscene: for tid in self.runtaskentries: (mc, fn, taskname, _) = split_tid_mcfn(tid) - setscenetid = fn + ":" + taskname + "_setscene" + setscenetid = tid + "_setscene" if setscenetid not in taskData[mc].taskentries: continue self.runq_setscene_tids.append(tid) def invalidate_task(tid, error_nostamp): - (mc, fn, taskname, _) = split_tid_mcfn(tid) - taskdep = self.dataCaches[mc].task_deps[fn] + (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) + taskdep = self.dataCaches[mc].task_deps[taskfn] if fn + ":" + taskname not in taskData[mc].taskentries: logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname) if 'nostamp' in taskdep and taskname in taskdep['nostamp']: @@ -997,8 +997,9 @@ class RunQueue: magic = "decafbadbad" if fakeroot: magic = magic + "beef" - fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True) - fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split() + mcdata = self.cooker.databuilder.mcdata[mc] + fakerootcmd = mcdata.getVar("FAKEROOTCMD", True) + fakerootenv = (mcdata.getVar("FAKEROOTBASEENV", True) or "").split() env = os.environ.copy() for key, value in (var.split('=') for var in fakerootenv): env[key] = value @@ -1059,10 +1060,9 @@ class RunQueue: for mc in self.rqdata.dataCaches: self.worker[mc] = self._start_worker(mc) - def start_fakeworker(self, rqexec): - if not self.fakeworker: - for mc in self.rqdata.dataCaches: - self.fakeworker[mc] = self._start_worker(mc, True, rqexec) + def start_fakeworker(self, rqexec, mc): + if not mc in self.fakeworker: + self.fakeworker[mc] = self._start_worker(mc, True, rqexec) def teardown_workers(self): self.teardown = True @@ -1322,7 +1322,7 @@ class RunQueue: continue sq_fn.append(fn) - sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) + sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn]) sq_hash.append(self.rqdata.runtaskentries[tid].hash) sq_taskname.append(taskname) sq_task.append(tid) @@ -1402,8 +1402,8 @@ class RunQueue: for tid in invalidtasks: - (mc, fn, taskname, _) = split_tid_mcfn(tid) - pn = self.rqdata.dataCaches[mc].pkg_fn[fn] + (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) + pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] h = self.rqdata.runtaskentries[tid].hash matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData) match = None @@ -1506,8 +1506,8 @@ class RunQueueExecute: taskdata = {} taskdeps.add(task) for dep in taskdeps: - (mc, fn, taskname, _) = split_tid_mcfn(dep) - pn = self.rqdata.dataCaches[mc].pkg_fn[fn] + (mc, fn, taskname, taskfn) = split_tid_mcfn(dep) + pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] taskdata[dep] = [pn, taskname, fn] call = self.rq.depvalidate + "(task, taskdata, notneeded, d)" locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data } @@ -1707,7 +1707,7 @@ class RunQueueExecuteTasks(RunQueueExecute): # Check tasks that are going to run against the whitelist def check_norun_task(tid, showerror=False): - (mc, fn, taskname, _) = split_tid_mcfn(tid) + (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) # Ignore covered tasks if tid in self.rq.scenequeue_covered: return False @@ -1715,11 +1715,11 @@ class RunQueueExecuteTasks(RunQueueExecute): if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache): return False # Ignore noexec tasks - taskdep = self.rqdata.dataCaches[mc].task_deps[fn] + taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] if 'noexec' in taskdep and taskname in taskdep['noexec']: return False - pn = self.rqdata.dataCaches[mc].pkg_fn[fn] + pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): if showerror: if tid in self.rqdata.runq_setscene_tids: @@ -1787,9 +1787,9 @@ class RunQueueExecuteTasks(RunQueueExecute): taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: - if not self.rq.fakeworker: + if not mc in self.rq.fakeworker: try: - self.rq.start_fakeworker(self) + self.rq.start_fakeworker(self, mc) except OSError as exc: logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc))) self.rq.state = runQueueFailed @@ -1868,6 +1868,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): sq_revdeps_new = {} sq_revdeps_squash = {} self.sq_harddeps = {} + self.stamps = {} # We need to construct a dependency graph for the setscene functions. Intermediate # dependencies between the setscene tasks only complicate the code. This code @@ -1978,9 +1979,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute): # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene" # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies for tid in self.rqdata.runq_setscene_tids: - (mc, fn, taskname, _) = split_tid_mcfn(tid) - realtid = fn + ":" + taskname + "_setscene" + (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) + realtid = tid + "_setscene" idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends + self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True) for (depname, idependtask) in idepends: if depname not in self.rqdata.taskData[mc].build_targets: @@ -2044,7 +2046,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): for tid in self.sq_revdeps: (mc, fn, taskname, taskfn) = split_tid_mcfn(tid) - taskdep = self.rqdata.dataCaches[mc].task_deps[fn] + taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] if 'noexec' in taskdep and taskname in taskdep['noexec']: noexec.append(tid) @@ -2065,7 +2067,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): continue sq_fn.append(fn) - sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn]) + sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn]) sq_hash.append(self.rqdata.runtaskentries[tid].hash) sq_taskname.append(taskname) sq_task.append(tid) @@ -2113,8 +2115,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute): def check_taskfail(self, task): if self.rqdata.setscenewhitelist: realtask = task.split('_setscene')[0] - (mc, fn, taskname, _) = split_tid_mcfn(realtask) - pn = self.rqdata.dataCaches[mc].pkg_fn[fn] + (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask) + pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn] if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist): logger.error('Task %s.%s failed' % (pn, taskname + "_setscene")) self.rq.state = runQueueCleanUp @@ -2157,7 +2159,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute): if self.stats.active < self.number_tasks: # Find the next setscene to run for nexttask in self.rqdata.runq_setscene_tids: - if nexttask in self.runq_buildable and nexttask not in self.runq_running: + if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values(): if nexttask in self.unskippable: logger.debug(2, "Setscene task %s is unskippable" % nexttask) if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True): @@ -2199,14 +2201,16 @@ class RunQueueExecuteScenequeue(RunQueueExecute): taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn] if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run: - if not self.rq.fakeworker: - self.rq.start_fakeworker(self) + if not mc in self.rq.fakeworker: + self.rq.start_fakeworker(self, mc) self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") self.rq.fakeworker[mc].process.stdin.flush() else: self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>") self.rq.worker[mc].process.stdin.flush() + self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True) + self.build_stamps2.append(self.build_stamps[task]) self.runq_running.add(task) self.stats.taskActive() if self.stats.active < self.number_tasks: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py index 3a7dac4cb..542bbb9d1 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py @@ -30,6 +30,7 @@ class SignatureGenerator(object): name = "noop" def __init__(self, data): + self.basehash = {} self.taskhash = {} self.runtaskdeps = {} self.file_checksum_values = {} @@ -61,11 +62,10 @@ class SignatureGenerator(object): return def get_taskdata(self): - return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints) + return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash) def set_taskdata(self, data): - self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data - + self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data class SignatureGeneratorBasic(SignatureGenerator): """ @@ -133,7 +133,11 @@ class SignatureGeneratorBasic(SignatureGenerator): var = lookupcache[dep] if var is not None: data = data + str(var) - self.basehash[fn + "." + task] = hashlib.md5(data.encode("utf-8")).hexdigest() + datahash = hashlib.md5(data.encode("utf-8")).hexdigest() + k = fn + "." + task + if k in self.basehash and self.basehash[k] != datahash: + bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash)) + self.basehash[k] = datahash taskdeps[task] = alldeps self.taskdeps[fn] = taskdeps @@ -182,6 +186,7 @@ class SignatureGeneratorBasic(SignatureGenerator): def get_taskhash(self, fn, task, deps, dataCache): k = fn + "." + task data = dataCache.basetaskhash[k] + self.basehash[k] = data self.runtaskdeps[k] = [] self.file_checksum_values[k] = [] recipename = dataCache.pkg_fn[fn] @@ -278,6 +283,15 @@ class SignatureGeneratorBasic(SignatureGenerator): if 'nostamp:' in self.taints[k]: data['taint'] = self.taints[k] + computed_basehash = calc_basehash(data) + if computed_basehash != self.basehash[k]: + bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k)) + if runtime and k in self.taskhash: + computed_taskhash = calc_taskhash(data) + if computed_taskhash != self.taskhash[k]: + bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k)) + sigfile = sigfile.replace(self.taskhash[k], computed_taskhash) + fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.") try: with os.fdopen(fd, "wb") as stream: @@ -292,15 +306,6 @@ class SignatureGeneratorBasic(SignatureGenerator): pass raise err - computed_basehash = calc_basehash(data) - if computed_basehash != self.basehash[k]: - bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k)) - if runtime and k in self.taskhash: - computed_taskhash = calc_taskhash(data) - if computed_taskhash != self.taskhash[k]: - bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k)) - - def dump_sigs(self, dataCaches, options): for fn in self.taskdeps: for task in self.taskdeps[fn]: @@ -346,9 +351,14 @@ def dump_this_task(outfile, d): bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp) def clean_basepath(a): + mc = None + if a.startswith("multiconfig:"): + _, mc, a = a.split(":", 2) b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2] if a.startswith("virtual:"): b = b + ":" + a.rsplit(":", 1)[0] + if mc: + b = b + ":multiconfig:" + mc return b def clean_basepaths(a): @@ -554,7 +564,8 @@ def calc_taskhash(sigdata): data = data + sigdata['runtaskhashes'][dep] for c in sigdata['file_checksum_values']: - data = data + c[1] + if c[1]: + data = data + c[1] if 'taint' in sigdata: if 'nostamp:' in sigdata['taint']: diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py index 8899e861c..9fa5b5b3d 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py @@ -51,10 +51,13 @@ class Tinfoil: features = [] if tracking: features.append(CookerFeatures.BASEDATASTORE_TRACKING) + cleanedvars = bb.utils.clean_environment() self.cooker = BBCooker(self.config, features) self.config_data = self.cooker.data bb.providers.logger.setLevel(logging.ERROR) self.cooker_data = None + for k in cleanedvars: + os.environ[k] = cleanedvars[k] def register_idle_function(self, function, data): pass diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py index 5b69660a3..3ddcb2ac6 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py @@ -982,6 +982,31 @@ class BuildInfoHelper(object): pass return task_information + def _get_layer_version_for_dependency(self, pathRE): + """ Returns the layer in the toaster db that has a full regex match to the pathRE. + pathRE - the layer path passed as a regex in the event. It is created in + cooker.py as a collection for the layer priorities. + """ + self._ensure_build() + + def _sort_longest_path(layer_version): + assert isinstance(layer_version, Layer_Version) + return len(layer_version.local_path) + + # we don't care if we match the trailing slashes + p = re.compile(re.sub("/[^/]*?$","",pathRE)) + # Heuristics: we always match recipe to the deepest layer path in the discovered layers + for lvo in sorted(self.orm_wrapper.layer_version_objects, reverse=True, key=_sort_longest_path): + if p.fullmatch(lvo.local_path): + return lvo + if lvo.layer.local_source_dir: + if p.fullmatch(lvo.layer.local_source_dir): + return lvo + #if we get here, we didn't read layers correctly; dump whatever information we have on the error log + logger.warning("Could not match layer dependency for path %s : %s", path, self.orm_wrapper.layer_version_objects) + + + def _get_layer_version_for_path(self, path): self._ensure_build() @@ -1372,7 +1397,7 @@ class BuildInfoHelper(object): if 'layer-priorities' in event._depgraph.keys(): for lv in event._depgraph['layer-priorities']: (_, path, _, priority) = lv - layer_version_obj = self._get_layer_version_for_path(path[1:]) # paths start with a ^ + layer_version_obj = self._get_layer_version_for_dependency(path) assert layer_version_obj is not None layer_version_obj.priority = priority layer_version_obj.save() diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py index fda7cc2c7..113fcedea 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py @@ -32,7 +32,10 @@ class BBUIHelper: def eventHandler(self, event): if isinstance(event, bb.build.TaskStarted): - self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() } + if event._mc != "default": + self.running_tasks[event.pid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time() } + else: + self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() } self.running_pids.append(event.pid) self.needUpdate = True elif isinstance(event, bb.build.TaskSucceeded): diff --git a/import-layers/yocto-poky/bitbake/lib/bb/utils.py b/import-layers/yocto-poky/bitbake/lib/bb/utils.py index 729848a1c..16fc9db25 100644 --- a/import-layers/yocto-poky/bitbake/lib/bb/utils.py +++ b/import-layers/yocto-poky/bitbake/lib/bb/utils.py @@ -378,7 +378,7 @@ def _print_exception(t, value, tb, realfile, text, context): # If the exception is from spwaning a task, let's be helpful and display # the output (which hopefully includes stderr). - if isinstance(value, subprocess.CalledProcessError): + if isinstance(value, subprocess.CalledProcessError) and value.output: error.append("Subprocess output:") error.append(value.output.decode("utf-8", errors="ignore")) finally: diff --git a/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py b/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py index 3dfa2b223..aec9dbb92 100644 --- a/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py +++ b/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py @@ -60,9 +60,19 @@ DATABASES = { if 'sqlite' in DATABASES['default']['ENGINE']: DATABASES['default']['OPTIONS'] = { 'timeout': 20 } -# Hosts/domain names that are valid for this site; required if DEBUG is False -# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts -ALLOWED_HOSTS = [] +# Update as of django 1.8.16 release, the '*' is needed to allow us to connect while running +# on hosts without explicitly setting the fqdn for the toaster server. +# See https://docs.djangoproject.com/en/dev/ref/settings/ for info on ALLOWED_HOSTS +# Previously this setting was not enforced if DEBUG was set but it is now. +# The previous behavior was such that ALLOWED_HOSTS defaulted to ['localhost','127.0.0.1','::1'] +# and if you bound to 0.0.0.0:<port #> then accessing toaster as localhost or fqdn would both work. +# To have that same behavior, with a fqdn explicitly enabled you would set +# ALLOWED_HOSTS= ['localhost','127.0.0.1','::1','myserver.mycompany.com'] for +# Django >= 1.8.16. By default, we are not enforcing this restriction in +# DEBUG mode. +if DEBUG is True: + # this will allow connection via localhost,hostname, or fqdn + ALLOWED_HOSTS = ['*'] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name diff --git a/import-layers/yocto-poky/documentation/bsp-guide/bsp-guide.xml b/import-layers/yocto-poky/documentation/bsp-guide/bsp-guide.xml index 1bbdb70fe..bf6a6f871 100644 --- a/import-layers/yocto-poky/documentation/bsp-guide/bsp-guide.xml +++ b/import-layers/yocto-poky/documentation/bsp-guide/bsp-guide.xml @@ -113,6 +113,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/dev-manual/dev-manual-common-tasks.xml b/import-layers/yocto-poky/documentation/dev-manual/dev-manual-common-tasks.xml index 086d0bad9..b2a2e32c5 100644 --- a/import-layers/yocto-poky/documentation/dev-manual/dev-manual-common-tasks.xml +++ b/import-layers/yocto-poky/documentation/dev-manual/dev-manual-common-tasks.xml @@ -1379,11 +1379,11 @@ </literallayout> Use this syntax to generate a recipe based on <replaceable>source</replaceable>. The options direct <filename>recipetool</filename> to - run in "quiet mode" and to generate debugging information. + generate debugging information. Once generated, the recipe resides in the existing source code layer: <literallayout class='monospaced'> - recipetool create -o <replaceable>OUTFILE</replaceable> <replaceable>source</replaceable> + recipetool create -d -o <replaceable>OUTFILE</replaceable> <replaceable>source</replaceable> </literallayout> </para> </section> @@ -2891,9 +2891,9 @@ machine, and a sysroot exists for the build host. <note> You could find the term "staging" used within the Yocto - project regarding files populating sysroot. - The term "staging" was used for previous releases of - the Yocto Project. + project regarding files populating sysroot (e.g. the + <ulink url='&YOCTO_DOCS_REF_URL;#var-STAGING_DIR'><filename>STAGING_DIR</filename></ulink> + variable). </note> </para> @@ -2906,7 +2906,12 @@ task within the <filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-D'><filename>D</filename></ulink><filename>}</filename> directory. - A subset of these files automatically populates the sysroot. + </para> + + <para> + A subset of these files, as defined by the + the <ulink url='&YOCTO_DOCS_REF_URL;#var-SYSROOT_DIRS'><filename>SYSROOT_DIRS</filename></ulink> + variable, automatically populates the sysroot. The reason for this limitation is that almost all files that populate the sysroot are cataloged in manifests in order to ensure the files can be removed later when a recipe is either @@ -2915,6 +2920,17 @@ </para> <para> + It is possible to modify the list of directories that populate + the sysroot. + The following example shows how you could add the + <filename>/opt</filename> directory to the list of + directories: + <literallayout class='monospaced'> + SYSROOT_DIRS += "/opt" + </literallayout> + </para> + + <para> For information on variables you can use to help control how files sysroot is populated, see the <ulink url='&YOCTO_DOCS_REF_URL;#var-SYSROOT_DIRS'><filename>SYSROOT_DIRS</filename></ulink>, @@ -2996,6 +3012,13 @@ If the script succeeds, the package is marked as installed. If the script fails, the package is marked as unpacked and the script is executed when the image boots again. + <note> + Any RPM post-installation script that runs on the target + should return a 0 exit code. + RPM does not allow non-zero exit codes for these scripts, + and the RPM package manager will cause the package to fail + installation on the target. + </note> </para> <para> @@ -3961,7 +3984,7 @@ </para></listitem> </itemizedlist> </para> -' + <para> For the RPM Package Management System, the following implementation details exist: @@ -4342,328 +4365,385 @@ format the device requires. Should your device require multiple partitions on an SD card, flash, or an HDD, you can use the OpenEmbedded Image Creator, - <filename>wic</filename>, to create the properly partitioned image. - </para> - - <para> - The <filename>wic</filename> command generates partitioned images - from existing OpenEmbedded build artifacts. - Image generation is driven by partitioning commands contained - in an Openembedded kickstart file (<filename>.wks</filename>) - specified either directly on the command line or as one of a - selection of canned <filename>.wks</filename> files as shown - with the <filename>wic list images</filename> command in the - "<link linkend='using-a-provided-kickstart_file'>Using an Existing Kickstart File</link>" - section. - When applied to a given set of build artifacts, the result is an - image or set of images that can be directly written onto media and - used on a particular system. + Wic, to create the properly partitioned image. </para> <para> - The <filename>wic</filename> command and the infrastructure - it is based on is by definition incomplete. - Its purpose is to allow the generation of customized images, - and as such was designed to be completely extensible through a - plug-in interface. - See the - "<link linkend='openembedded-kickstart-plugins'>Plug-ins</link>" - section for information on these plug-ins. - </para> - - <para> - This section provides some background information on - <filename>wic</filename>, describes what you need to have in - place to run the tool, provides instruction on how to use - <filename>wic</filename>, and provides several examples. + You can generate partitioned images + (<replaceable>image</replaceable><filename>.wic</filename>) + two ways: using the OpenEmbedded build system and by running + the OpenEmbedded Image Creator Wic directly. + The former way is preferable as it is easier to use and understand. </para> - <section id='wic-background'> - <title>Background</title> + <section id='creating-wic-images-oe'> + <title>Creating Partitioned Images</title> <para> - This section provides some background on the - <filename>wic</filename> utility. - While none of this information is required to use - <filename>wic</filename>, you might find it interesting. + The OpenEmbedded build system can generate + partitioned images the same way as it generates + any other image type. + To generate a partitioned image, you need to modify + two variables. <itemizedlist> <listitem><para> - The name "wic" is derived from OpenEmbedded - Image Creator (oeic). - The "oe" diphthong in "oeic" was promoted to the - letter "w", because "oeic" is both difficult to remember and - pronounce.</para></listitem> - <listitem><para> - <filename>wic</filename> is loosely based on the - Meego Image Creator (<filename>mic</filename>) - framework. - The <filename>wic</filename> implementation has been - heavily modified to make direct use of OpenEmbedded - build artifacts instead of package installation and - configuration, which are already incorporated within - the OpenEmbedded artifacts.</para></listitem> - <listitem><para> - <filename>wic</filename> is a completely independent - standalone utility that initially provides - easier-to-use and more flexible replacements for a - couple bits of existing functionality in OE Core's - <ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-image-live'><filename>image-live</filename></ulink> - class and <filename>mkefidisk.sh</filename> script. - The difference between - <filename>wic</filename> and those examples is - that with <filename>wic</filename> the - functionality of those scripts is implemented - by a general-purpose partitioning language, which is - based on Redhat kickstart syntax.</para></listitem> - </itemizedlist> - </para> - </section> - - <section id='wic-requirements'> - <title>Requirements</title> - - <para> - In order to use the <filename>wic</filename> utility - with the OpenEmbedded Build system, your system needs - to meet the following requirements: - <itemizedlist> - <listitem><para>The Linux distribution on your - development host must support the Yocto Project. - See the - "<ulink url='&YOCTO_DOCS_REF_URL;#detailed-supported-distros'>Supported Linux Distributions</ulink>" - section in the Yocto Project Reference Manual for this - list of distributions.</para></listitem> - <listitem><para> - The standard system utilities, such as - <filename>cp</filename>, must be installed on your - development host system. - </para></listitem> - <listitem><para> - You need to have the build artifacts already - available, which typically means that you must - have already created an image using the - Openembedded build system (e.g. - <filename>core-image-minimal</filename>). - While it might seem redundant to generate an image in - order to create an image using - <filename>wic</filename>, the current version of - <filename>wic</filename> requires the artifacts - in the form generated by the build system. - </para></listitem> - <listitem><para> - You must build several native tools, which are tools - built to run on the build system: - <literallayout class='monospaced'> - $ bitbake parted-native dosfstools-native mtools-native - </literallayout> + Include "wic" as part of the + <ulink url='&YOCTO_DOCS_REF_URL;#var-IMAGE_FSTYPES'><filename>IMAGE_FSTYPES</filename></ulink> + variable. </para></listitem> <listitem><para> - You must have sourced one of the build environment - setup scripts (i.e. - <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink> - or - <ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>) - found in the - <link linkend='build-directory'>Build Directory</link>. + Include the name of the + <link linkend='openembedded-kickstart-wks-reference'>wic kickstart file</link> + as part of the + <ulink url='&YOCTO_DOCS_REF_URL;#var-WKS_FILE'><filename>WKS_FILE</filename></ulink> + variable </para></listitem> </itemizedlist> + Further steps to generate a partitioned image + are the same as for any other image type. + For information on image types, see the + "<link linkend='building-images'>Building Images</link>" + section. </para> </section> - <section id='wic-getting-help'> - <title>Getting Help</title> + <section id='create-wic-images-wic'> + <title>Using OpenEmbedded Image Creator Wic to Generate Partitioned Images</title> <para> - You can get general help for the <filename>wic</filename> - by entering the <filename>wic</filename> command by itself - or by entering the command with a help argument as follows: - <literallayout class='monospaced'> - $ wic -h - $ wic --help - </literallayout> + The <filename>wic</filename> command generates partitioned + images from existing OpenEmbedded build artifacts. + Image generation is driven by partitioning commands + contained in an Openembedded kickstart file + (<filename>.wks</filename>) specified either directly on + the command line or as one of a selection of canned + <filename>.wks</filename> files as shown with the + <filename>wic list images</filename> command in the + "<link linkend='using-a-provided-kickstart-file'>Using an Existing Kickstart File</link>" + section. + When you apply the command to a given set of build + artifacts, the result is an image or set of images that + can be directly written onto media and used on a particular + system. </para> <para> - Currently, <filename>wic</filename> supports two commands: - <filename>create</filename> and <filename>list</filename>. - You can get help for these commands as follows: - <literallayout class='monospaced'> - $ wic help <replaceable>command</replaceable> - </literallayout> + The <filename>wic</filename> command and the infrastructure + it is based on is by definition incomplete. + The purpose of the command is to allow the generation of + customized images, and as such, was designed to be + completely extensible through a plug-in interface. + See the + "<link linkend='openembedded-kickstart-plugins'>Plug-ins</link>" + section for information on these plug-ins. </para> <para> - You can also get detailed help on a number of topics - from the help system. - The output of <filename>wic --help</filename> - displays a list of available help - topics under a "Help topics" heading. - You can have the help system display the help text for - a given topic by prefacing the topic with - <filename>wic help</filename>: - <literallayout class='monospaced'> - $ wic help <replaceable>help_topic</replaceable> - </literallayout> + This section provides some background information on Wic, + describes what you need to have in + place to run the tool, provides instruction on how to use + the <filename>wic</filename> utility, + and provides several examples. </para> - <para> - You can find out more about the images - <filename>wic</filename> creates using the existing - kickstart files with the following form of the command: - <literallayout class='monospaced'> - $ wic list <replaceable>image</replaceable> help - </literallayout> - where <filename><replaceable>image</replaceable></filename> is either - <filename>directdisk</filename> or - <filename>mkefidisk</filename>. - </para> - </section> + <section id='wic-background'> + <title>Background</title> - <section id='operational-modes'> - <title>Operational Modes</title> + <para> + This section provides some background on the + <filename>wic</filename> utility. + While none of this information is required to use + Wic, you might find it interesting. + <itemizedlist> + <listitem><para> + The name "Wic" is derived from OpenEmbedded + Image Creator (oeic). + The "oe" diphthong in "oeic" was promoted to the + letter "w", because "oeic" is both difficult to + remember and to pronounce. + </para></listitem> + <listitem><para> + Wic is loosely based on the + Meego Image Creator (<filename>mic</filename>) + framework. + The Wic implementation has been + heavily modified to make direct use of OpenEmbedded + build artifacts instead of package installation and + configuration, which are already incorporated within + the OpenEmbedded artifacts. + </para></listitem> + <listitem><para> + Wic is a completely independent + standalone utility that initially provides + easier-to-use and more flexible replacements for a + existing functionality in OE Core's + <ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-image-live'><filename>image-live</filename></ulink> + class and <filename>mkefidisk.sh</filename> script. + The difference between + Wic and those examples is + that with Wic the + functionality of those scripts is implemented + by a general-purpose partitioning language, which is + based on Redhat kickstart syntax.</para></listitem> + </itemizedlist> + </para> + </section> - <para> - You can use <filename>wic</filename> in two different - modes, depending on how much control you need for - specifying the Openembedded build artifacts that are - used for creating the image: Raw and Cooked: - <itemizedlist> - <listitem><para><emphasis>Raw Mode:</emphasis> - You explicitly specify build artifacts through - command-line arguments.</para></listitem> - <listitem><para><emphasis>Cooked Mode:</emphasis> - The current - <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> - setting and image name are used to automatically locate - and provide the build artifacts.</para></listitem> - </itemizedlist> - </para> + <section id='wic-requirements'> + <title>Requirements</title> - <para> - Regardless of the mode you use, you need to have the build - artifacts ready and available. - Additionally, the environment must be set up using the - <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink> - or - <ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink> - script found in the - <link linkend='build-directory'>Build Directory</link>. - </para> + <para> + In order to use the <filename>wic</filename> utility + with the OpenEmbedded Build system, your system needs + to meet the following requirements: + <itemizedlist> + <listitem><para>The Linux distribution on your + development host must support the Yocto Project. + See the + "<ulink url='&YOCTO_DOCS_REF_URL;#detailed-supported-distros'>Supported Linux Distributions</ulink>" + section in the Yocto Project Reference Manual for + the list of distributions that support the + Yocto Project. + </para></listitem> + <listitem><para> + The standard system utilities, such as + <filename>cp</filename>, must be installed on your + development host system. + </para></listitem> + <listitem><para> + You need to have the build artifacts already + available, which typically means that you must + have already created an image using the + Openembedded build system (e.g. + <filename>core-image-minimal</filename>). + While it might seem redundant to generate an image + in order to create an image using + Wic, the current version of + Wic requires the artifacts + in the form generated by the build system. + </para></listitem> + <listitem><para> + You must build several native tools, which are tools + built to run on the build system: + <literallayout class='monospaced'> + $ bitbake parted-native dosfstools-native mtools-native + </literallayout> + </para></listitem> + <listitem><para> + You must have sourced one of the build environment + setup scripts (i.e. + <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink> + or + <ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>) + found in the + <link linkend='build-directory'>Build Directory</link>. + </para></listitem> + </itemizedlist> + </para> + </section> - <section id='raw-mode'> - <title>Raw Mode</title> + <section id='wic-getting-help'> + <title>Getting Help</title> + + <para> + You can get general help for the <filename>wic</filename> + command by entering the <filename>wic</filename> command + by itself or by entering the command with a help argument + as follows: + <literallayout class='monospaced'> + $ wic -h + $ wic --help + </literallayout> + </para> <para> - The general form of the 'wic' command in raw mode is: + Currently, Wic supports two commands: + <filename>create</filename> and <filename>list</filename>. + You can get help for these commands as follows: <literallayout class='monospaced'> + $ wic help <replaceable>command</replaceable> + with <replaceable>command</replaceable> being either + <filename>create</filename> or <filename>list</filename>. + </literallayout> + </para> + + <para> + You can also get detailed help on a number of topics + from the help system. + The output of <filename>wic --help</filename> + displays a list of available help + topics under a "Help topics" heading. + You can have the help system display the help text for + a given topic by prefacing the topic with + <filename>wic help</filename>: + <literallayout class='monospaced'> + $ wic help <replaceable>help_topic</replaceable> + </literallayout> + </para> + + <para> + You can find out more about the images + Wic creates using the existing + kickstart files with the following form of the command: + <literallayout class='monospaced'> + $ wic list <replaceable>image</replaceable> help + </literallayout> + with <filename><replaceable>image</replaceable></filename> + being either <filename>directdisk</filename> or + <filename>mkefidisk</filename>. + </para> + </section> + + <section id='operational-modes'> + <title>Operational Modes</title> + + <para> + You can use Wic in two different + modes, depending on how much control you need for + specifying the Openembedded build artifacts that are + used for creating the image: Raw and Cooked: + <itemizedlist> + <listitem><para> + <emphasis>Raw Mode:</emphasis> + You explicitly specify build artifacts through + command-line arguments. + </para></listitem> + <listitem><para> + <emphasis>Cooked Mode:</emphasis> + The current + <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> + setting and image name are used to automatically + locate and provide the build artifacts. + </para></listitem> + </itemizedlist> + </para> + + <para> + Regardless of the mode you use, you need to have the build + artifacts ready and available. + Additionally, the environment must be set up using the + <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink> + or + <ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink> + script found in the + <link linkend='build-directory'>Build Directory</link>. + </para> + + <section id='raw-mode'> + <title>Raw Mode</title> + + <para> + The general form of the + <filename>wic</filename> command in raw mode is: + <literallayout class='monospaced'> $ wic create <replaceable>image_name</replaceable>.wks [<replaceable>options</replaceable>] [...] - Where: + Where: - <replaceable>image_name</replaceable>.wks - An OpenEmbedded kickstart file. You can provide - your own custom file or use a file from a set of - existing files as described by further options. + <replaceable>image_name</replaceable>.wks + An OpenEmbedded kickstart file. You can provide + your own custom file or use a file from a set of + existing files as described by further options. - -o <replaceable>OUTDIR</replaceable>, --outdir=<replaceable>OUTDIR</replaceable> - The name of a directory in which to create image. + -o <replaceable>OUTDIR</replaceable>, --outdir=<replaceable>OUTDIR</replaceable> + The name of a directory in which to create image. - -i <replaceable>PROPERTIES_FILE</replaceable>, --infile=<replaceable>PROPERTIES_FILE</replaceable> - The name of a file containing the values for image - properties as a JSON file. + -i <replaceable>PROPERTIES_FILE</replaceable>, --infile=<replaceable>PROPERTIES_FILE</replaceable> + The name of a file containing the values for image + properties as a JSON file. - -e <replaceable>IMAGE_NAME</replaceable>, --image-name=<replaceable>IMAGE_NAME</replaceable> - The name of the image from which to use the artifacts - (e.g. <filename>core-image-sato</filename>). + -e <replaceable>IMAGE_NAME</replaceable>, --image-name=<replaceable>IMAGE_NAME</replaceable> + The name of the image from which to use the artifacts + (e.g. <filename>core-image-sato</filename>). - -r <replaceable>ROOTFS_DIR</replaceable>, --rootfs-dir=<replaceable>ROOTFS_DIR</replaceable> - The path to the <filename>/rootfs</filename> directory to use as the - <filename>.wks</filename> rootfs source. + -r <replaceable>ROOTFS_DIR</replaceable>, --rootfs-dir=<replaceable>ROOTFS_DIR</replaceable> + The path to the <filename>/rootfs</filename> directory to use as the + <filename>.wks</filename> rootfs source. - -b <replaceable>BOOTIMG_DIR</replaceable>, --bootimg-dir=<replaceable>BOOTIMG_DIR</replaceable> - The path to the directory containing the boot artifacts - (e.g. <filename>/EFI</filename> or <filename>/syslinux</filename>) to use as the <filename>.wks</filename> bootimg - source. + -b <replaceable>BOOTIMG_DIR</replaceable>, --bootimg-dir=<replaceable>BOOTIMG_DIR</replaceable> + The path to the directory containing the boot artifacts + (e.g. <filename>/EFI</filename> or <filename>/syslinux</filename>) to use as the <filename>.wks</filename> bootimg + source. - -k <replaceable>KERNEL_DIR</replaceable>, --kernel-dir=<replaceable>KERNEL_DIR</replaceable> - The path to the directory containing the kernel to use - in the <filename>.wks</filename> boot image. + -k <replaceable>KERNEL_DIR</replaceable>, --kernel-dir=<replaceable>KERNEL_DIR</replaceable> + The path to the directory containing the kernel to use + in the <filename>.wks</filename> boot image. - -n <replaceable>NATIVE_SYSROOT</replaceable>, --native-sysroot=<replaceable>NATIVE_SYSROOT</replaceable> - The path to the native sysroot containing the tools to use - to build the image. + -n <replaceable>NATIVE_SYSROOT</replaceable>, --native-sysroot=<replaceable>NATIVE_SYSROOT</replaceable> + The path to the native sysroot containing the tools to use + to build the image. - -s, --skip-build-check - Skips the build check. + -s, --skip-build-check + Skips the build check. - -D, --debug - Output debug information. - </literallayout> - <note> - You do not need root privileges to run - <filename>wic</filename>. - In fact, you should not run as root when using the - utility. - </note> - </para> - </section> + -D, --debug + Output debug information. + </literallayout> + <note> + You do not need root privileges to run + Wic. + In fact, you should not run as root when using the + utility. + </note> + </para> + </section> - <section id='cooked-mode'> - <title>Cooked Mode</title> + <section id='cooked-mode'> + <title>Cooked Mode</title> - <para> - The general form of the <filename>wic</filename> command - using Cooked Mode is: - <literallayout class='monospaced'> + <para> + The general form of the <filename>wic</filename> command + using Cooked Mode is: + <literallayout class='monospaced'> $ wic create <replaceable>kickstart_file</replaceable> -e <replaceable>image_name</replaceable> - Where: + Where: - <replaceable>kickstart_file</replaceable> - An OpenEmbedded kickstart file. You can provide your own - custom file or supplied file. + <replaceable>kickstart_file</replaceable> + An OpenEmbedded kickstart file. You can provide your own + custom file or a supplied file. - <replaceable>image_name</replaceable> - Specifies the image built using the OpenEmbedded build - system. - </literallayout> - This form is the simplest and most user-friendly, as it - does not require specifying all individual parameters. - All you need to provide is your own - <filename>.wks</filename> file or one provided with the - release. - </para> + <replaceable>image_name</replaceable> + Specifies the image built using the OpenEmbedded build + system. + </literallayout> + This form is the simplest and most user-friendly, as it + does not require specifying all individual parameters. + All you need to provide is your own + <filename>.wks</filename> file or one provided with the + release. + </para> + </section> </section> - </section> - <section id='using-a-provided-kickstart_file'> - <title>Using an Existing Kickstart File</title> + <section id='using-a-provided-kickstart-file'> + <title>Using an Existing Kickstart File</title> - <para> - If you do not want to create your own - <filename>.wks</filename> file, you can use an existing - file provided by the <filename>wic</filename> installation. - Use the following command to list the available files: - <literallayout class='monospaced'> + <para> + If you do not want to create your own + <filename>.wks</filename> file, you can use an existing + file provided by the Wic installation. + Use the following command to list the available files: + <literallayout class='monospaced'> $ wic list images directdisk Create a 'pcbios' direct disk image mkefidisk Create an EFI disk image - </literallayout> - When you use an existing file, you do not have to use the - <filename>.wks</filename> extension. - Here is an example in Raw Mode that uses the - <filename>directdisk</filename> file: - <literallayout class='monospaced'> + </literallayout> + When you use an existing file, you do not have to use the + <filename>.wks</filename> extension. + Here is an example in Raw Mode that uses the + <filename>directdisk</filename> file: + <literallayout class='monospaced'> $ wic create directdisk -r <replaceable>rootfs_dir</replaceable> -b <replaceable>bootimg_dir</replaceable> \ -k <replaceable>kernel_dir</replaceable> -n <replaceable>native_sysroot</replaceable> - </literallayout> - </para> + </literallayout> + </para> - <para> - Here are the actual partition language commands - used in the <filename>mkefidisk.wks</filename> file to generate - an image: - <literallayout class='monospaced'> + <para> + Here are the actual partition language commands + used in the <filename>mkefidisk.wks</filename> file to + generate an image: + <literallayout class='monospaced'> # short-description: Create an EFI disk image # long-description: Creates a partitioned EFI disk image that the user # can directly dd to boot media. @@ -4675,30 +4755,30 @@ part swap --ondisk sda --size 44 --label swap1 --fstype=swap bootloader --timeout=10 --append="rootwait rootfstype=ext3 console=ttyPCH0,115200 console=tty0 vmalloc=256MB snd-hda-intel.enable_msi=0" - </literallayout> - </para> - </section> + </literallayout> + </para> + </section> - <section id='wic-usage-examples'> - <title>Examples</title> + <section id='wic-usage-examples'> + <title>Examples</title> - <para> - This section provides several examples that show how to use - the <filename>wic</filename> utility. - All the examples assume the list of requirements in the - "<link linkend='wic-requirements'>Requirements</link>" section - have been met. - The examples assume the previously generated image is - <filename>core-image-minimal</filename>. - </para> + <para> + This section provides several examples that show how to use + the <filename>wic</filename> utility. + All the examples assume the list of requirements in the + "<link linkend='wic-requirements'>Requirements</link>" + section have been met. + The examples assume the previously generated image is + <filename>core-image-minimal</filename>. + </para> - <section id='generate-an-image-using-a-provided-kickstart-file'> - <title>Generate an Image using an Existing Kickstart File</title> + <section id='generate-an-image-using-a-provided-kickstart-file'> + <title>Generate an Image using an Existing Kickstart File</title> - <para> - This example runs in Cooked Mode and uses the - <filename>mkefidisk</filename> kickstart file: - <literallayout class='monospaced'> + <para> + This example runs in Cooked Mode and uses the + <filename>mkefidisk</filename> kickstart file: + <literallayout class='monospaced'> $ wic create mkefidisk -e core-image-minimal Checking basic build environment... Done. @@ -4714,114 +4794,115 @@ KERNEL_DIR: /home/trz/yocto/yocto-image/build/tmp/sysroots/minnow/usr/src/kernel NATIVE_SYSROOT: /home/trz/yocto/yocto-image/build/tmp/sysroots/x86_64-linux - The image(s) were created using OE kickstart file: /home/trz/yocto/yocto-image/scripts/lib/image/canned-wks/mkefidisk.wks - </literallayout> - This example shows the easiest way to create an image - by running in Cooked Mode and using the - <filename>-e</filename> option with an existing kickstart - file. - All that is necessary is to specify the image used to - generate the artifacts. - Your <filename>local.conf</filename> needs to have the - <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> - variable set to the machine you are using, which is - "minnow" in this example. - </para> + </literallayout> + The previous example shows the easiest way to create + an image by running in Cooked Mode and using the + <filename>-e</filename> option with an existing + kickstart file. + All that is necessary is to specify the image used to + generate the artifacts. + Your <filename>local.conf</filename> needs to have the + <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> + variable set to the machine you are using, which is + "minnow" in this example. + </para> - <para> - The output specifies the exact image created as well as - where it was created. - The output also names the artifacts used and the exact - <filename>.wks</filename> script that was used to generate - the image. - <note> - You should always verify the details provided in the - output to make sure that the image was indeed created - exactly as expected. - </note> - </para> + <para> + The output specifies the exact image created as well as + where it was created. + The output also names the artifacts used and the exact + <filename>.wks</filename> script that was used to + generate the image. + <note> + You should always verify the details provided in the + output to make sure that the image was indeed + created exactly as expected. + </note> + </para> - <para> - Continuing with the example, you can now directly - <filename>dd</filename> the image to a USB stick, or - whatever media for which you built your image, - and boot the resulting media: - <literallayout class='monospaced'> + <para> + Continuing with the example, you can now directly + <filename>dd</filename> the image to a USB stick, or + whatever media for which you built your image, + and boot the resulting media: + <literallayout class='monospaced'> $ sudo dd if=/var/tmp/wic/build/mkefidisk-201310230946-sda.direct of=/dev/sdb [sudo] password for trz: 182274+0 records in 182274+0 records out 93324288 bytes (93 MB) copied, 14.4777 s, 6.4 MB/s - [trz@empanada ~]$ sudo eject /dev/sdb - </literallayout> - </para> - </section> + [trz at empanada ~]$ sudo eject /dev/sdb + </literallayout> + </para> + </section> - <section id='using-a-modified-kickstart-file'> - <title>Using a Modified Kickstart File</title> + <section id='using-a-modified-kickstart-file'> + <title>Using a Modified Kickstart File</title> - <para> - Because <filename>wic</filename> image creation is driven - by the kickstart file, it is easy to affect image creation - by changing the parameters in the file. - This next example demonstrates that through modification - of the <filename>directdisk</filename> kickstart file. - </para> + <para> + Because partitioned image creation is + driven by the kickstart file, it is easy to affect + image creation by changing the parameters in the file. + This next example demonstrates that through modification + of the <filename>directdisk</filename> kickstart file. + </para> - <para> - As mentioned earlier, you can use the command - <filename>wic list images</filename> to show the list - of existing kickstart files. - The directory in which these files reside is - <filename>scripts/lib/image/canned-wks/</filename> - located in the - <link linkend='source-directory'>Source Directory</link>. - Because the available files reside in this directory, you - can create and add your own custom files to the directory. - Subsequent use of the <filename>wic list images</filename> - command would then include your kickstart files. - </para> + <para> + As mentioned earlier, you can use the command + <filename>wic list images</filename> to show the list + of existing kickstart files. + The directory in which these files reside is + <filename>scripts/lib/image/canned-wks/</filename> + located in the + <link linkend='source-directory'>Source Directory</link>. + Because the available files reside in this directory, + you can create and add your own custom files to the + directory. + Subsequent use of the + <filename>wic list images</filename> command would then + include your kickstart files. + </para> - <para> - In this example, the existing - <filename>directdisk</filename> file already does most - of what is needed. - However, for the hardware in this example, the image will - need to boot from <filename>sdb</filename> instead of - <filename>sda</filename>, which is what the - <filename>directdisk</filename> kickstart file uses. - </para> + <para> + In this example, the existing + <filename>directdisk</filename> file already does most + of what is needed. + However, for the hardware in this example, the image + will need to boot from <filename>sdb</filename> instead + of <filename>sda</filename>, which is what the + <filename>directdisk</filename> kickstart file uses. + </para> - <para> - The example begins by making a copy of the - <filename>directdisk.wks</filename> file in the - <filename>scripts/lib/image/canned-wks</filename> - directory and then changing the lines that specify the - target disk from which to boot. - <literallayout class='monospaced'> + <para> + The example begins by making a copy of the + <filename>directdisk.wks</filename> file in the + <filename>scripts/lib/image/canned-wks</filename> + directory and then by changing the lines that specify + the target disk from which to boot. + <literallayout class='monospaced'> $ cp /home/trz/yocto/yocto-image/scripts/lib/image/canned-wks/directdisk.wks \ /home/trz/yocto/yocto-image/scripts/lib/image/canned-wks/directdisksdb.wks - </literallayout> - Next, the example modifies the - <filename>directdisksdb.wks</filename> file and changes all - instances of "<filename>--ondisk sda</filename>" - to "<filename>--ondisk sdb</filename>". - The example changes the following two lines and leaves the - remaining lines untouched: - <literallayout class='monospaced'> + </literallayout> + Next, the example modifies the + <filename>directdisksdb.wks</filename> file and changes + all instances of "<filename>--ondisk sda</filename>" + to "<filename>--ondisk sdb</filename>". + The example changes the following two lines and leaves + the remaining lines untouched: + <literallayout class='monospaced'> part /boot --source bootimg-pcbios --ondisk sdb --label boot --active --align 1024 part / --source rootfs --ondisk sdb --fstype=ext3 --label platform --align 1024 - </literallayout> - Once the lines are changed, the example generates the - <filename>directdisksdb</filename> image. - The command points the process at the - <filename>core-image-minimal</filename> artifacts for the - Next Unit of Computing (nuc) - <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> - the <filename>local.conf</filename>. - <literallayout class='monospaced'> + </literallayout> + Once the lines are changed, the example generates the + <filename>directdisksdb</filename> image. + The command points the process at the + <filename>core-image-minimal</filename> artifacts for + the Next Unit of Computing (nuc) + <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> + the <filename>local.conf</filename>. + <literallayout class='monospaced'> $ wic create directdisksdb -e core-image-minimal Checking basic build environment... Done. @@ -4832,39 +4913,39 @@ /var/tmp/wic/build/directdisksdb-201310231131-sdb.direct The following build artifacts were used to create the image(s): + ROOTFS_DIR: /home/trz/yocto/yocto-image/build/tmp/work/nuc-poky-linux/core-image-minimal/1.0-r0/rootfs BOOTIMG_DIR: /home/trz/yocto/yocto-image/build/tmp/sysroots/nuc/usr/share KERNEL_DIR: /home/trz/yocto/yocto-image/build/tmp/sysroots/nuc/usr/src/kernel NATIVE_SYSROOT: /home/trz/yocto/yocto-image/build/tmp/sysroots/x86_64-linux - The image(s) were created using OE kickstart file: /home/trz/yocto/yocto-image/scripts/lib/image/canned-wks/directdisksdb.wks - </literallayout> - Continuing with the example, you can now directly - <filename>dd</filename> the image to a USB stick, or - whatever media for which you built your image, - and boot the resulting media: - <literallayout class='monospaced'> + </literallayout> + Continuing with the example, you can now directly + <filename>dd</filename> the image to a USB stick, or + whatever media for which you built your image, + and boot the resulting media: + <literallayout class='monospaced'> $ sudo dd if=/var/tmp/wic/build/directdisksdb-201310231131-sdb.direct of=/dev/sdb 86018+0 records in 86018+0 records out 44041216 bytes (44 MB) copied, 13.0734 s, 3.4 MB/s - [trz@empanada tmp]$ sudo eject /dev/sdb - </literallayout> - </para> - </section> + [trz at empanada tmp]$ sudo eject /dev/sdb + </literallayout> + </para> + </section> - <section id='creating-an-image-based-on-core-image-minimal-and-crownbay-noemgd'> - <title>Creating an Image Based on <filename>core-image-minimal</filename> and <filename>crownbay-noemgd</filename></title> + <section id='creating-an-image-based-on-core-image-minimal-and-crownbay-noemgd'> + <title>Creating an Image Based on <filename>core-image-minimal</filename> and <filename>crownbay-noemgd</filename></title> - <para> - This example creates an image based on - <filename>core-image-minimal</filename> and a - <filename>crownbay-noemgd</filename> - <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> - that works right out of the box. - <literallayout class='monospaced'> + <para> + This example creates an image based on + <filename>core-image-minimal</filename> and a + <filename>crownbay-noemgd</filename> + <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> + that works right out of the box. + <literallayout class='monospaced'> $ wic create directdisk -e core-image-minimal Checking basic build environment... @@ -4884,21 +4965,21 @@ The image(s) were created using OE kickstart file: /home/trz/yocto/yocto-image/scripts/lib/image/canned-wks/directdisk.wks - </literallayout> - </para> - </section> + </literallayout> + </para> + </section> - <section id='using-a-modified-kickstart-file-and-running-in-raw-mode'> - <title>Using a Modified Kickstart File and Running in Raw Mode</title> + <section id='using-a-modified-kickstart-file-and-running-in-raw-mode'> + <title>Using a Modified Kickstart File and Running in Raw Mode</title> - <para> - This next example manually specifies each build artifact - (runs in Raw Mode) and uses a modified kickstart file. - The example also uses the <filename>-o</filename> option - to cause <filename>wic</filename> to create the output - somewhere other than the default - <filename>/var/tmp/wic</filename> directory: - <literallayout class='monospaced'> + <para> + This next example manually specifies each build artifact + (runs in Raw Mode) and uses a modified kickstart file. + The example also uses the <filename>-o</filename> option + to cause Wic to create the output + somewhere other than the default + <filename>/var/tmp/wic</filename> directory: + <literallayout class='monospaced'> $ wic create ~/test.wks -o /home/trz/testwic --rootfs-dir \ /home/trz/yocto/yocto-image/build/tmp/work/crownbay_noemgd-poky-linux/core-image-minimal/1.0-r0/rootfs \ --bootimg-dir /home/trz/yocto/yocto-image/build/tmp/sysroots/crownbay-noemgd/usr/share \ @@ -4919,441 +5000,555 @@ The image(s) were created using OE kickstart file: /home/trz/test.wks - </literallayout> - For this example, - <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> - did not have to be specified in the - <filename>local.conf</filename> file since the artifact is - manually specified. - </para> + </literallayout> + For this example, + <ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> + did not have to be specified in the + <filename>local.conf</filename> file since the + artifact is manually specified. + </para> + </section> </section> - </section> - <section id='openembedded-kickstart-plugins'> - <title>Plug-ins</title> + <section id='openembedded-kickstart-plugins'> + <title>Plug-ins</title> - <para> - Plug-ins allow <filename>wic</filename> functionality to - be extended and specialized by users. - This section documents the plugin interface, which is - currently restricted to source plug ins. - </para> + <para> + Plug-ins allow Wic functionality to + be extended and specialized by users. + This section documents the plug-in interface, which is + currently restricted to source plug-ins. + </para> - <para> - Source plug ins provide a mechanism to customize - various aspects of the image generation process in - <filename>wic</filename>, mainly the contents of - partitions. - The plug ins provide a mechanism for mapping values - specified in <filename>.wks</filename> files using the - <filename>--source</filename> keyword to a - particular plugin implementation that populates a - corresponding partition. - </para> + <para> + Source plug-ins provide a mechanism to customize + various aspects of the image generation process in + Wic, mainly the contents of + partitions. + The plug-ins provide a mechanism for mapping values + specified in <filename>.wks</filename> files using the + <filename>--source</filename> keyword to a + particular plug-in implementation that populates a + corresponding partition. + </para> - <para> - A source plugin is created as a subclass of - <filename>SourcePlugin</filename>. - The plugin file containing it is added to - <filename>scripts/lib/wic/plugins/source/</filename> to - make the plugin implementation available to the - <filename>wic</filename> implementation. - For more information, see - <filename>scripts/lib/wic/pluginbase.py</filename>. - </para> + <para> + A source plug-in is created as a subclass of + <filename>SourcePlugin</filename>. + The plug-in file containing it is added to + <filename>scripts/lib/wic/plugins/source/</filename> to + make the plug-in implementation available to the + Wic implementation. + For more information, see + <filename>scripts/lib/wic/pluginbase.py</filename>. + </para> - <para> - Source plugins can also be implemented and added by - external layers. - As such, any plugins found in a - <filename>scripts/lib/wic/plugins/source/</filename> - directory in an external layer are also made - available. - </para> + <para> + Source plug-ins can also be implemented and added by + external layers. + As such, any plug-ins found in a + <filename>scripts/lib/wic/plugins/source/</filename> + directory in an external layer are also made + available. + </para> - <para> - When the <filename>wic</filename> implementation needs - to invoke a partition-specific implementation, it looks - for the plugin that has the same name as the - <filename>--source</filename> parameter given to - that partition. - For example, if the partition is set up as follows: - <literallayout class='monospaced'> + <para> + When the Wic implementation needs + to invoke a partition-specific implementation, it looks + for the plug-in that has the same name as the + <filename>--source</filename> parameter given to + that partition. + For example, if the partition is set up as follows: + <literallayout class='monospaced'> part /boot --source bootimg-pcbios ... - </literallayout> - The methods defined as class members of the plugin - having the matching <filename>bootimg-pcbios.name</filename> - class member are used. - </para> + </literallayout> + The methods defined as class members of the plug-in + having the matching <filename>bootimg-pcbios.name</filename> + class member are used. + </para> - <para> - To be more concrete, here is the plugin definition that - matches a - <filename>--source bootimg-pcbios</filename> usage, - along with an example - method called by the <filename>wic</filename> implementation - when it needs to invoke an implementation-specific - partition-preparation function: - <literallayout class='monospaced'> + <para> + To be more concrete, here is the plug-in definition that + matches a + <filename>--source bootimg-pcbios</filename> usage, + along with an example + method called by the Wic implementation + when it needs to invoke an implementation-specific + partition-preparation function: + <literallayout class='monospaced'> class BootimgPcbiosPlugin(SourcePlugin): name = 'bootimg-pcbios' @classmethod def do_prepare_partition(self, part, ...) - </literallayout> - If the subclass itself does not implement a function, a - default version in a superclass is located and - used, which is why all plugins must be derived from - <filename>SourcePlugin</filename>. - </para> - - <para> - The <filename>SourcePlugin</filename> class defines the - following methods, which is the current set of methods - that can be implemented or overridden by - <filename>--source</filename> plugins. - Any methods not implemented by a - <filename>SourcePlugin</filename> subclass inherit the - implementations present in the - <filename>SourcePlugin</filename> class. - For more information, see the - <filename>SourcePlugin</filename> source for details: - </para> - - <para> - <itemizedlist> - <listitem><para><emphasis><filename>do_prepare_partition()</filename>:</emphasis> - Called to do the actual content population for a - partition. - In other words, the method prepares the final - partition image that is incorporated into the - disk image. - </para></listitem> - <listitem><para><emphasis><filename>do_configure_partition()</filename>:</emphasis> - Called before - <filename>do_prepare_partition()</filename>. - This method is typically used to create custom - configuration files for a partition (e.g. syslinux or - grub configuration files). - </para></listitem> - <listitem><para><emphasis><filename>do_install_disk()</filename>:</emphasis> - Called after all partitions have been prepared and - assembled into a disk image. - This method provides a hook to allow finalization of a - disk image, (e.g. writing an MBR). - </para></listitem> - <listitem><para><emphasis><filename>do_stage_partition()</filename>:</emphasis> - Special content-staging hook called before - <filename>do_prepare_partition()</filename>. - This method is normally empty.</para> - <para>Typically, a partition just uses the passed-in - parameters (e.g. the unmodified value of - <filename>bootimg_dir</filename>). - However, in some cases things might need to be - more tailored. - As an example, certain files might additionally - need to be taken from - <filename>bootimg_dir + /boot</filename>. - This hook allows those files to be staged in a - customized fashion. - <note> - <filename>get_bitbake_var()</filename> - allows you to access non-standard variables - that you might want to use for this. - </note> - </para></listitem> - </itemizedlist> - </para> - - <para> - This scheme is extensible. - Adding more hooks is a simple matter of adding more - plugin methods to <filename>SourcePlugin</filename> and - derived classes. - The code that then needs to call the plugin methods uses - <filename>plugin.get_source_plugin_methods()</filename> - to find the method or methods needed by the call. - Retrieval of those methods is accomplished - by filling up a dict with keys - containing the method names of interest. - On success, these will be filled in with the actual - methods. - Please see the <filename>wic</filename> - implementation for examples and details. - </para> - </section> - - <section id='openembedded-kickstart-wks-reference'> - <title>OpenEmbedded Kickstart (.wks) Reference</title> - - <para> - The current <filename>wic</filename> implementation supports - only the basic kickstart partitioning commands: - <filename>partition</filename> (or <filename>part</filename> - for short) and <filename>bootloader</filename>. - <note> - Future updates will implement more commands and options. - If you use anything that is not specifically - supported, results can be unpredictable. - </note> - </para> - - <para> - The following is a list of the commands, their syntax, - and meanings. - The commands are based on the Fedora - kickstart versions but with modifications to - reflect <filename>wic</filename> capabilities. - You can see the original documentation for those commands - at the following links: - <itemizedlist> - <listitem><para> - <ulink url='http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition'>http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition</ulink> - </para></listitem> - <listitem><para> - <ulink url='http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader'>http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader</ulink> - </para></listitem> - </itemizedlist> - </para> - - <section id='command-part-or-partition'> - <title>Command: part or partition</title> - - <para> - Either of these commands create a partition on the system - and uses the following syntax: - <literallayout class='monospaced'> - part [<replaceable>mntpoint</replaceable>] - partition [<replaceable>mntpoint</replaceable>] </literallayout> - If you do not provide - <replaceable>mntpoint</replaceable>, wic creates a partition - but does not mount it. - </para> - - <para> - The <filename><replaceable>mntpoint</replaceable></filename> - is where the - partition will be mounted and must be of one of the - following forms: - <itemizedlist> - <listitem><para><filename>/<replaceable>path</replaceable></filename>: - For example, <filename>/</filename>, - <filename>/usr</filename>, or - <filename>/home</filename></para></listitem> - <listitem><para><filename>swap</filename>: - The created partition is used as swap space. - </para></listitem> - </itemizedlist> + If the subclass itself does not implement a function, a + default version in a superclass is located and + used, which is why all plug-ins must be derived from + <filename>SourcePlugin</filename>. </para> <para> - Specifying a <replaceable>mntpoint</replaceable> causes - the partition to automatically be mounted. - Wic achieves this by adding entries to the filesystem - table (fstab) during image generation. - In order for wic to generate a valid fstab, you must - also provide one of the <filename>--ondrive</filename>, - <filename>--ondisk</filename>, or - <filename>--use-uuid</filename> partition options as part - of the command. - Here is an example using "/" as the mountpoint. - The command uses "--ondisk" to force the partition onto - the <filename>sdb</filename> disk: - <literallayout class='monospaced'> - part / --source rootfs --ondisk sdb --fstype=ext3 --label platform --align 1024 - </literallayout> + The <filename>SourcePlugin</filename> class defines the + following methods, which is the current set of methods + that can be implemented or overridden by + <filename>--source</filename> plug-ins. + Any methods not implemented by a + <filename>SourcePlugin</filename> subclass inherit the + implementations present in the + <filename>SourcePlugin</filename> class. + For more information, see the + <filename>SourcePlugin</filename> source for details: </para> <para> - Here is a list that describes other supported options you - can use with the <filename>part</filename> and - <filename>partition</filename> commands: <itemizedlist> - <listitem><para><emphasis><filename>--size</filename>:</emphasis> - The minimum partition size in MBytes. - Specify an integer value such as 500. - Do not append the number with "MB". - You do not need this option if you use - <filename>--source</filename>.</para></listitem> - <listitem><para><emphasis><filename>--source</filename>:</emphasis> - This option is a - <filename>wic</filename>-specific option that - names the source of the data that populates - the partition. - The most common value for this option is - "rootfs", but you can use any value that maps to - a valid source plugin. - For information on the source plugins, see the - "<link linkend='openembedded-kickstart-plugins'>Plugins</link>" - section.</para> - <para>If you use - <filename>--source rootfs</filename>, - <filename>wic</filename> creates a partition as - large as needed and to fill it with the contents of - the root filesystem pointed to by the - <filename>-r</filename> command-line option - or the equivalent rootfs derived from the - <filename>-e</filename> command-line - option. - The filesystem type used to create the - partition is driven by the value of the - <filename>--fstype</filename> option - specified for the partition. - See the entry on - <filename>--fstype</filename> that - follows for more information. - </para> - <para>If you use - <filename>--source <replaceable>plugin-name</replaceable></filename>, - <filename>wic</filename> creates a partition as - large as needed and fills it with the contents of - the partition that is generated by the - specified plugin name using the data pointed - to by the <filename>-r</filename> command-line - option or the equivalent rootfs derived from the - <filename>-e</filename> command-line - option. - Exactly what those contents and filesystem type end - up being are dependent on the given plugin - implementation. - </para> - <para>If you do not use the - <filename>--source</filename> option, the - <filename>wic</filename> command creates an empty + <listitem><para> + <emphasis><filename>do_prepare_partition()</filename>:</emphasis> + Called to do the actual content population for a partition. - Consequently, you must use the - <filename>--size</filename> option to specify the - size of the empty partition. + In other words, the method prepares the final + partition image that is incorporated into the + disk image. </para></listitem> - <listitem><para><emphasis><filename>--ondisk</filename> or <filename>--ondrive</filename>:</emphasis> - Forces the partition to be created on a particular - disk.</para></listitem> - <listitem><para><emphasis><filename>--fstype</filename>:</emphasis> - Sets the file system type for the partition. - Valid values are: - <itemizedlist> - <listitem><para><filename>ext4</filename> - </para></listitem> - <listitem><para><filename>ext3</filename> - </para></listitem> - <listitem><para><filename>ext2</filename> - </para></listitem> - <listitem><para><filename>btrfs</filename> - </para></listitem> - <listitem><para><filename>squashfs</filename> - </para></listitem> - <listitem><para><filename>swap</filename> - </para></listitem> - </itemizedlist></para></listitem> - <listitem><para><emphasis><filename>--fsoptions</filename>:</emphasis> - Specifies a free-form string of options to be - used when mounting the filesystem. - This string will be copied into the - <filename>/etc/fstab</filename> file of the - installed system and should be enclosed in - quotes. - If not specified, the default string - is "defaults". - </para></listitem> - <listitem><para><emphasis><filename>--label label</filename>:</emphasis> - Specifies the label to give to the filesystem to - be made on the partition. - If the given label is already in use by another - filesystem, a new label is created for the - partition.</para></listitem> - <listitem><para><emphasis><filename>--active</filename>:</emphasis> - Marks the partition as active.</para></listitem> - <listitem><para><emphasis><filename>--align (in KBytes)</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that says to start a partition on an - x KBytes boundary.</para></listitem> - <listitem><para><emphasis><filename>--no-table</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option. - Using the option reserves space for the partition - and causes it to become populated. - However, the partition is not added to the - partition table. - </para></listitem> - <listitem><para><emphasis><filename>--extra-space</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that adds extra space after the space - filled by the content of the partition. - The final size can go beyond the size specified - by the <filename>--size</filename> option. - The default value is 10 Mbytes. - </para></listitem> - <listitem><para><emphasis><filename>--overhead-factor</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that multiplies the size of the partition by - the option's value. - You must supply a value greater than or equal to - "1". - The default value is "1.3". - </para></listitem> - <listitem><para><emphasis><filename>--part-type</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that specifies the partition type globally - unique identifier (GUID) for GPT partitions. - You can find the list of partition type GUIDs - at - <ulink url='http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs'></ulink>. + <listitem><para> + <emphasis><filename>do_configure_partition()</filename>:</emphasis> + Called before + <filename>do_prepare_partition()</filename>. + This method is typically used to create custom + configuration files for a partition (e.g. syslinux + or grub configuration files). </para></listitem> - <listitem><para><emphasis><filename>--use-uuid</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that causes <filename>wic</filename> to - generate a random GUID for the partition. - The generated identifier is used in the bootloader - configuration to specify the root partition. + <listitem><para> + <emphasis><filename>do_install_disk()</filename>:</emphasis> + Called after all partitions have been prepared and + assembled into a disk image. + This method provides a hook to allow finalization + of a disk image, (e.g. writing an MBR). </para></listitem> - <listitem><para><emphasis><filename>--uuid</filename>:</emphasis> - This option is a <filename>wic</filename>-specific - option that specifies the partition UUID. + <listitem><para> + <emphasis><filename>do_stage_partition()</filename>:</emphasis> + Special content-staging hook called before + <filename>do_prepare_partition()</filename>. + This method is normally empty.</para> + <para>Typically, a partition just uses the passed-in + parameters (e.g. the unmodified value of + <filename>bootimg_dir</filename>). + However, in some cases things might need to be + more tailored. + As an example, certain files might additionally + need to be taken from + <filename>bootimg_dir + /boot</filename>. + This hook allows those files to be staged in a + customized fashion. + <note> + <filename>get_bitbake_var()</filename> + allows you to access non-standard variables + that you might want to use for this. + </note> </para></listitem> </itemizedlist> </para> + + <para> + This scheme is extensible. + Adding more hooks is a simple matter of adding more + plug-in methods to <filename>SourcePlugin</filename> and + derived classes. + The code that then needs to call the plug-in methods uses + <filename>plugin.get_source_plugin_methods()</filename> + to find the method or methods needed by the call. + Retrieval of those methods is accomplished + by filling up a dict with keys + containing the method names of interest. + On success, these will be filled in with the actual + methods. + Please see the Wic + implementation for examples and details. + </para> </section> - <section id='command-bootloader'> - <title>Command: bootloader</title> + <section id='openembedded-kickstart-wks-reference'> + <title>OpenEmbedded Kickstart (<filename>.wks</filename>) Reference</title> <para> - This command specifies how the boot loader should be - configured and supports the following options: + The current Wic implementation supports + only the basic kickstart partitioning commands: + <filename>partition</filename> (or <filename>part</filename> + for short) and <filename>bootloader</filename>. <note> - Bootloader functionality and boot partitions are - implemented by the various - <filename>--source</filename> - plugins that implement bootloader functionality. - The bootloader command essentially provides a means of - modifying bootloader configuration. + Future updates will implement more commands and options. + If you use anything that is not specifically + supported, results can be unpredictable. </note> + </para> + + <para> + The following is a list of the commands, their syntax, + and meanings. + The commands are based on the Fedora + kickstart versions but with modifications to + reflect Wic capabilities. + You can see the original documentation for those commands + at the following links: <itemizedlist> - <listitem><para><emphasis><filename>--timeout</filename>:</emphasis> - Specifies the number of seconds before the - bootloader times out and boots the default option. - </para></listitem> - <listitem><para><emphasis><filename>--append</filename>:</emphasis> - Specifies kernel parameters. - These parameters will be added to the syslinux - <filename>APPEND</filename> or - <filename>grub</filename> kernel command line. + <listitem><para> + <ulink url='http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition'>http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition</ulink> </para></listitem> - <listitem><para><emphasis><filename>--configfile</filename>:</emphasis> - Specifies a user-defined configuration file for - the bootloader. - You can provide a full pathname for the file or - a file that exists in the - <filename>canned-wks</filename> folder. - This option overrides all other bootloader options. + <listitem><para> + <ulink url='http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader'>http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader</ulink> </para></listitem> </itemizedlist> </para> + + <section id='command-part-or-partition'> + <title>Command: part or partition</title> + + <para> + Either of these commands create a partition on the system + and use the following syntax: + <literallayout class='monospaced'> + part [<replaceable>mntpoint</replaceable>] + partition [<replaceable>mntpoint</replaceable>] + </literallayout> + If you do not provide + <replaceable>mntpoint</replaceable>, Wic creates a + partition but does not mount it. + </para> + + <para> + The + <filename><replaceable>mntpoint</replaceable></filename> + is where the partition will be mounted and must be of + one of the following forms: + <itemizedlist> + <listitem><para> + <filename>/<replaceable>path</replaceable></filename>: + For example, <filename>/</filename>, + <filename>/usr</filename>, or + <filename>/home</filename> + </para></listitem> + <listitem><para> + <filename>swap</filename>: + The created partition is used as swap space. + </para></listitem> + </itemizedlist> + </para> + + <para> + Specifying a <replaceable>mntpoint</replaceable> causes + the partition to automatically be mounted. + Wic achieves this by adding entries to the filesystem + table (fstab) during image generation. + In order for wic to generate a valid fstab, you must + also provide one of the <filename>--ondrive</filename>, + <filename>--ondisk</filename>, or + <filename>--use-uuid</filename> partition options as + part of the command. + Here is an example using "/" as the mountpoint. + The command uses "--ondisk" to force the partition onto + the <filename>sdb</filename> disk: + <literallayout class='monospaced'> + part / --source rootfs --ondisk sdb --fstype=ext3 --label platform --align 1024 + </literallayout> + </para> + + <para> + Here is a list that describes other supported options + you can use with the <filename>part</filename> and + <filename>partition</filename> commands: + <itemizedlist> + <listitem><para> + <emphasis><filename>--size</filename>:</emphasis> + The minimum partition size in MBytes. + Specify an integer value such as 500. + Do not append the number with "MB". + You do not need this option if you use + <filename>--source</filename>. + </para></listitem> + <listitem><para> + <emphasis><filename>--source</filename>:</emphasis> + This option is a + Wic-specific option that + names the source of the data that populates + the partition. + The most common value for this option is + "rootfs", but you can use any value that maps to + a valid source plug-in. + For information on the source plug-ins, see the + "<link linkend='openembedded-kickstart-plugins'>Plug-ins</link>" + section.</para> + <para>If you use + <filename>--source rootfs</filename>, + Wic creates a partition as + large as needed and to fill it with the contents + of the root filesystem pointed to by the + <filename>-r</filename> command-line option + or the equivalent rootfs derived from the + <filename>-e</filename> command-line + option. + The filesystem type used to create the + partition is driven by the value of the + <filename>--fstype</filename> option + specified for the partition. + See the entry on + <filename>--fstype</filename> that + follows for more information. + </para> + <para>If you use + <filename>--source <replaceable>plugin-name</replaceable></filename>, + Wic creates a partition as + large as needed and fills it with the contents + of the partition that is generated by the + specified plug-in name using the data pointed + to by the <filename>-r</filename> command-line + option or the equivalent rootfs derived from the + <filename>-e</filename> command-line + option. + Exactly what those contents and filesystem type + end up being are dependent on the given plug-in + implementation. + </para> + <para>If you do not use the + <filename>--source</filename> option, the + <filename>wic</filename> command creates an + empty partition. + Consequently, you must use the + <filename>--size</filename> option to specify + the size of the empty partition. + </para></listitem> + <listitem><para> + <emphasis><filename>--ondisk</filename> or <filename>--ondrive</filename>:</emphasis> + Forces the partition to be created on a + particular disk. + </para></listitem> + <listitem><para> + <emphasis><filename>--fstype</filename>:</emphasis> + Sets the file system type for the partition. + Valid values are: + <itemizedlist> + <listitem><para><filename>ext4</filename> + </para></listitem> + <listitem><para><filename>ext3</filename> + </para></listitem> + <listitem><para><filename>ext2</filename> + </para></listitem> + <listitem><para><filename>btrfs</filename> + </para></listitem> + <listitem><para><filename>squashfs</filename> + </para></listitem> + <listitem><para><filename>swap</filename> + </para></listitem> + </itemizedlist> + </para></listitem> + <listitem><para> + <emphasis><filename>--fsoptions</filename>:</emphasis> + Specifies a free-form string of options to be + used when mounting the filesystem. + This string will be copied into the + <filename>/etc/fstab</filename> file of the + installed system and should be enclosed in + quotes. + If not specified, the default string + is "defaults". + </para></listitem> + <listitem><para> + <emphasis><filename>--label label</filename>:</emphasis> + Specifies the label to give to the filesystem to + be made on the partition. + If the given label is already in use by another + filesystem, a new label is created for the + partition. + </para></listitem> + <listitem><para> + <emphasis><filename>--active</filename>:</emphasis> + Marks the partition as active. + </para></listitem> + <listitem><para> + <emphasis><filename>--align (in KBytes)</filename>:</emphasis> + This option is a + Wic-specific option that + says to start a partition on an + <replaceable>x</replaceable> KBytes + boundary.</para></listitem> + <listitem><para> + <emphasis><filename>--no-table</filename>:</emphasis> + This option is a + Wic-specific option. + Using the option reserves space for the + partition and causes it to become populated. + However, the partition is not added to the + partition table. + </para></listitem> + <listitem><para> + <emphasis><filename>--extra-space</filename>:</emphasis> + This option is a + Wic-specific option that + adds extra space after the space filled by the + content of the partition. + The final size can go beyond the size specified + by the <filename>--size</filename> option. + The default value is 10 Mbytes. + </para></listitem> + <listitem><para> + <emphasis><filename>--overhead-factor</filename>:</emphasis> + This option is a + Wic-specific option that + multiplies the size of the partition by the + option's value. + You must supply a value greater than or equal to + "1". + The default value is "1.3". + </para></listitem> + <listitem><para> + <emphasis><filename>--part-type</filename>:</emphasis> + This option is a + Wic-specific option that + specifies the partition type globally + unique identifier (GUID) for GPT partitions. + You can find the list of partition type GUIDs + at + <ulink url='http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs'></ulink>. + </para></listitem> + <listitem><para> + <emphasis><filename>--use-uuid</filename>:</emphasis> + This option is a + Wic-specific option that + causes Wic to generate a + random GUID for the partition. + The generated identifier is used in the + bootloader configuration to specify the root + partition. + </para></listitem> + <listitem><para> + <emphasis><filename>--uuid</filename>:</emphasis> + This option is a + Wic-specific + option that specifies the partition UUID. + </para></listitem> + </itemizedlist> + </para> + </section> + + <section id='command-bootloader'> + <title>Command: bootloader</title> + + <para> + This command specifies how the bootloader should be + configured and supports the following options: + <note> + Bootloader functionality and boot partitions are + implemented by the various + <filename>--source</filename> + plug-ins that implement bootloader functionality. + The bootloader command essentially provides a + means of modifying bootloader configuration. + </note> + <itemizedlist> + <listitem><para> + <emphasis><filename>--timeout</filename>:</emphasis> + Specifies the number of seconds before the + bootloader times out and boots the default + option. + </para></listitem> + <listitem><para> + <emphasis><filename>--append</filename>:</emphasis> + Specifies kernel parameters. + These parameters will be added to the syslinux + <filename>APPEND</filename> or + <filename>grub</filename> kernel command line. + </para></listitem> + <listitem><para> + <emphasis><filename>--configfile</filename>:</emphasis> + Specifies a user-defined configuration file for + the bootloader. + You can provide a full pathname for the file or + a file that exists in the + <filename>canned-wks</filename> folder. + This option overrides all other bootloader + options. + </para></listitem> + </itemizedlist> + </para> + </section> </section> </section> </section> + <section id='building-an-initramfs-image'> + <title>Building an Initial RAM Filesystem (initramfs) Image</title> + + <para> + initramfs is the successor of Initial RAM Disk (initrd). + It is a "copy in and out" (cpio) archive of the initial file system + that gets loaded into memory during the Linux startup process. + Because Linux uses the contents of the archive during + initialization, the initramfs needs to contain all of the device + drivers and tools needed to mount the final root filesystem. + </para> + + <para> + To build an initramfs image and bundle it into the kernel, set the + <ulink url='&YOCTO_DOCS_REF_URL;#var-INITRAMFS_IMAGE_BUNDLE'><filename>INITRAMFS_IMAGE_BUNDLE</filename></ulink> + variable in your <filename>local.conf</filename> file, and set the + <ulink url='&YOCTO_DOCS_REF_URL;#var-INITRAMFS_IMAGE'><filename>INITRAMFS_IMAGE</filename></ulink> + variable in your <filename>machine.conf</filename> file: + <literallayout class='monospaced'> + INITRAMFS_IMAGE_BUNDLE = "1" + INITRAMFS_IMAGE = "<replaceable>image_recipe_name</replaceable>" + </literallayout> + Setting the <filename>INITRAMFS_IMAGE_BUNDLE</filename> + flag causes the initramfs created by the recipe and defined by + <filename>INITRAMFS_IMAGE</filename> to be unpacked into the + <filename>${B}/usr/</filename> directory. + The unpacked initramfs is then passed to the kernel's + <filename>Makefile</filename> using the + <ulink url='&YOCTO_DOCS_REF_URL;#var-CONFIG_INITRAMFS_SOURCE'><filename>CONFIG_INITRAMFS_SOURCE</filename></ulink> + variable, allowing initramfs to be built in to the kernel + normally. + <note> + The preferred method is to use the + <filename>INITRAMFS_IMAGE</filename> variable rather than the + <filename>INITRAMFS_TASK</filename> variable. + Setting <filename>INITRAMFS_TASK</filename> is supported for + backward compatibility. + However, use of this variable has circular dependency + problems. + See the + <ulink url='&YOCTO_DOCS_REF_URL;#var-INITRAMFS_IMAGE_BUNDLE'><filename>INITRAMFS_IMAGE_BUNDLE</filename></ulink> + variable for additional information on these dependency + problems. + </note> + </para> + + <para> + The recipe that <filename>INITRAMFS_IMAGE</filename> + points to must produce a <filename>.cpio.gz</filename>, + <filename>.cpio.tar</filename>, <filename>.cpio.lz4</filename>, + <filename>.cpio.lzma</filename>, or + <filename>.cpio.xz</filename> file. + You can ensure you produce one of these <filename>.cpio.*</filename> + files by setting the + <ulink url='&YOCTO_DOCS_REF_URL;#var-INITRAMFS_FSTYPES'><filename>INITRAMFS_FSTYPES</filename></ulink> + variable in your configuration file to one or more of the above + file types. + <note> + If you add items to the initramfs image by way of its recipe, + you should use + <ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_INSTALL'><filename>PACKAGE_INSTALL</filename></ulink> + rather than + <ulink url='&YOCTO_DOCS_REF_URL;#var-IMAGE_INSTALL'><filename>IMAGE_INSTALL</filename></ulink>. + <filename>PACKAGE_INSTALL</filename> gives more direct control + of what is added to the image as compared to the defaults you + might not necessarily want that are set by the + <ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-image'><filename>image</filename></ulink> + or + <ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-core-image'><filename>core-image</filename></ulink> + classes. + </note> + </para> + </section> + <section id='configuring-the-kernel'> <title>Configuring the Kernel</title> @@ -7499,26 +7694,29 @@ </para> <para> - If a committed change results in changing the package output, - then the value of the PR variable needs to be increased - (or "bumped") as part of that commit. + If a committed change results in changing the package + output, then the value of the PR variable needs to be + increased (or "bumped") as part of that commit. For new recipes you should add the <filename>PR</filename> - variable and set its initial value equal to "r0", which is the default. - Even though the default value is "r0", the practice of adding it to a new recipe makes - it harder to forget to bump the variable when you make changes - to the recipe in future. + variable and set its initial value equal to "r0", which is + the default. + Even though the default value is "r0", the practice of + adding it to a new recipe makes it harder to forget to bump + the variable when you make changes to the recipe in future. </para> <para> - If you are sharing a common <filename>.inc</filename> file with multiple recipes, - you can also use the + If you are sharing a common <filename>.inc</filename> file + with multiple recipes, you can also use the <filename><ulink url='&YOCTO_DOCS_REF_URL;#var-INC_PR'>INC_PR</ulink></filename> - variable to ensure that - the recipes sharing the <filename>.inc</filename> file are rebuilt when the + variable to ensure that the recipes sharing the + <filename>.inc</filename> file are rebuilt when the <filename>.inc</filename> file itself is changed. - The <filename>.inc</filename> file must set <filename>INC_PR</filename> - (initially to "r0"), and all recipes referring to it should set <filename>PR</filename> - to "$(INC_PR).0" initially, incrementing the last number when the recipe is changed. + The <filename>.inc</filename> file must set + <filename>INC_PR</filename> (initially to "r0"), and all + recipes referring to it should set <filename>PR</filename> + to "${INC_PR}.0" initially, incrementing the last number + when the recipe is changed. If the <filename>.inc</filename> file is changed then its <filename>INC_PR</filename> should be incremented. </para> @@ -7527,14 +7725,14 @@ When upgrading the version of a package, assuming the <filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PV'>PV</ulink></filename> changes, the <filename>PR</filename> variable should be - reset to "r0" (or "$(INC_PR).0" if you are using + reset to "r0" (or "${INC_PR}.0" if you are using <filename>INC_PR</filename>). </para> <para> Usually, version increases occur only to packages. - However, if for some reason <filename>PV</filename> changes but does not - increase, you can increase the + However, if for some reason <filename>PV</filename> changes + but does not increase, you can increase the <filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PE'>PE</ulink></filename> variable (Package Epoch). The <filename>PE</filename> variable defaults to "0". @@ -7544,7 +7742,8 @@ Version numbering strives to follow the <ulink url='http://www.debian.org/doc/debian-policy/ch-controlfields.html'> Debian Version Field Policy Guidelines</ulink>. - These guidelines define how versions are compared and what "increasing" a version means. + These guidelines define how versions are compared and what + "increasing" a version means. </para> </section> </section> @@ -9520,27 +9719,47 @@ <para> If your image is already built, make sure the following are set - in your <filename>local.conf</filename> file. - Be sure to provide the IP address you need: + in your <filename>local.conf</filename> file: <literallayout class='monospaced'> INHERIT +="testexport" - TEST_TARGET_IP = "192.168.7.2" - TEST_SERVER_IP = "192.168.7.1" + TEST_TARGET_IP = "<replaceable>IP-address-for-the-test-target</replaceable>" + TEST_SERVER_IP = "<replaceable>IP-address-for-the-test-server</replaceable>" </literallayout> - You can then export the tests with the following: + You can then export the tests with the following BitBake + command form: <literallayout class='monospaced'> - $ bitbake core-image-sato -c testexport + $ bitbake <replaceable>image</replaceable> -c testexport </literallayout> Exporting the tests places them in the <link linkend='build-directory'>Build Directory</link> in - <filename>tmp/testexport/core-image-sato</filename>, which - is controlled by the + <filename>tmp/testexport/</filename><replaceable>image</replaceable>, + which is controlled by the <filename>TEST_EXPORT_DIR</filename> variable. </para> <para> You can now run the tests outside of the build environment: <literallayout class='monospaced'> + $ cd tmp/testexport/<replaceable>image</replaceable> + $ ./runexported.py testdata.json + </literallayout> + </para> + + <para> + Here is a complete example that shows IP addresses and uses + the <filename>core-image-sato</filename> image: + <literallayout class='monospaced'> + INHERIT +="testexport" + TEST_TARGET_IP = "192.168.7.2" + TEST_SERVER_IP = "192.168.7.1" + </literallayout> + Use BitBake to export the tests: + <literallayout class='monospaced'> + $ bitbake core-image-sato -c testexport + </literallayout> + Run the tests outside of the build environment using the + following: + <literallayout class='monospaced'> $ cd tmp/testexport/core-image-sato $ ./runexported.py testdata.json </literallayout> diff --git a/import-layers/yocto-poky/documentation/dev-manual/dev-manual-start.xml b/import-layers/yocto-poky/documentation/dev-manual/dev-manual-start.xml index 23bf8eb0e..b59f54b08 100644 --- a/import-layers/yocto-poky/documentation/dev-manual/dev-manual-start.xml +++ b/import-layers/yocto-poky/documentation/dev-manual/dev-manual-start.xml @@ -352,6 +352,11 @@ <ulink url='&YOCTO_DOCS_REF_URL;#var-SDKMACHINE'><filename>SDKMACHINE</filename></ulink>). If you are not using an SDK type image, you need to separately download and install the stand-alone Yocto Project cross-toolchain tarball. + See the + "<ulink url='&YOCTO_DOCS_SDK_URL;#sdk-appendix-obtain'>Obtaining the SDK</ulink>" + appendix in the Yocto Project Software Development Kit (SDK) + Developer's Guide for more information on locating and installing + cross-toolchains. </para> <para> diff --git a/import-layers/yocto-poky/documentation/dev-manual/dev-manual.xml b/import-layers/yocto-poky/documentation/dev-manual/dev-manual.xml index 0012aaa3b..2ce1652fc 100644 --- a/import-layers/yocto-poky/documentation/dev-manual/dev-manual.xml +++ b/import-layers/yocto-poky/documentation/dev-manual/dev-manual.xml @@ -91,6 +91,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/kernel-dev/kernel-dev.xml b/import-layers/yocto-poky/documentation/kernel-dev/kernel-dev.xml index 12828d26c..b96acd6f0 100644 --- a/import-layers/yocto-poky/documentation/kernel-dev/kernel-dev.xml +++ b/import-layers/yocto-poky/documentation/kernel-dev/kernel-dev.xml @@ -76,6 +76,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/mega-manual/mega-manual.xml b/import-layers/yocto-poky/documentation/mega-manual/mega-manual.xml index c16e92861..157feac31 100644 --- a/import-layers/yocto-poky/documentation/mega-manual/mega-manual.xml +++ b/import-layers/yocto-poky/documentation/mega-manual/mega-manual.xml @@ -60,6 +60,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> @@ -126,6 +136,8 @@ <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" href="../sdk-manual/sdk-appendix-customizing.xml"/> <xi:include + xmlns:xi="http://www.w3.org/2003/XInclude" href="../sdk-manual/sdk-appendix-customizing-standard.xml"/> + <xi:include xmlns:xi="http://www.w3.org/2003/XInclude" href="../sdk-manual/sdk-appendix-mars.xml"/> <!-- Includes bsp-guide title image and then bsp-guide chapters --> diff --git a/import-layers/yocto-poky/documentation/poky.ent b/import-layers/yocto-poky/documentation/poky.ent index b36c234b1..364020792 100644 --- a/import-layers/yocto-poky/documentation/poky.ent +++ b/import-layers/yocto-poky/documentation/poky.ent @@ -1,12 +1,12 @@ -<!ENTITY DISTRO "2.2"> -<!ENTITY DISTRO_COMPRESSED "22"> +<!ENTITY DISTRO "2.2.2"> +<!ENTITY DISTRO_COMPRESSED "222"> <!ENTITY DISTRO_NAME_NO_CAP "morty"> <!ENTITY DISTRO_NAME "Morty"> -<!ENTITY YOCTO_DOC_VERSION "2.2"> -<!ENTITY POKYVERSION "17.0.0"> -<!ENTITY POKYVERSION_COMPRESSED "1700"> +<!ENTITY YOCTO_DOC_VERSION "2.2.2"> +<!ENTITY POKYVERSION "17.0.1"> +<!ENTITY POKYVERSION_COMPRESSED "1702"> <!ENTITY YOCTO_POKY "poky-&DISTRO_NAME_NO_CAP;-&POKYVERSION;"> -<!ENTITY COPYRIGHT_YEAR "2010-2016"> +<!ENTITY COPYRIGHT_YEAR "2010-2017"> <!ENTITY YOCTO_DL_URL "http://downloads.yoctoproject.org"> <!ENTITY YOCTO_HOME_URL "http://www.yoctoproject.org"> <!ENTITY YOCTO_LISTS_URL "http://lists.yoctoproject.org"> @@ -61,13 +61,15 @@ <!ENTITY OE_INIT_PATH "&YOCTO_POKY;/oe-init-build-env"> <!ENTITY OE_INIT_FILE "oe-init-build-env"> <!ENTITY UBUNTU_HOST_PACKAGES_ESSENTIAL "gawk wget git-core diffstat unzip texinfo gcc-multilib \ - build-essential chrpath socat"> + build-essential chrpath socat cpio python python3 python3-pip python3-pexpect"> <!ENTITY FEDORA_HOST_PACKAGES_ESSENTIAL "gawk make wget tar bzip2 gzip python3 unzip perl patch \ diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath \ ccache perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue perl-bignum socat \ - findutils which"> + findutils which file cpio python python3-pip python3-pexpect"> <!ENTITY OPENSUSE_HOST_PACKAGES_ESSENTIAL "python gcc gcc-c++ git chrpath make wget python-xml \ - diffstat makeinfo python-curses patch socat"> + diffstat makeinfo python-curses patch socat python3 python3-curses tar python3-pip \ + python3-pexpect"> <!ENTITY CENTOS_HOST_PACKAGES_ESSENTIAL "gawk make wget tar bzip2 gzip python unzip perl patch \ diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath socat \ - perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue"> + perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue python3-pip python3-pexpect"> + diff --git a/import-layers/yocto-poky/documentation/profile-manual/profile-manual.xml b/import-layers/yocto-poky/documentation/profile-manual/profile-manual.xml index 4717906ca..a88934f6e 100644 --- a/import-layers/yocto-poky/documentation/profile-manual/profile-manual.xml +++ b/import-layers/yocto-poky/documentation/profile-manual/profile-manual.xml @@ -76,6 +76,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/ref-manual/introduction.xml b/import-layers/yocto-poky/documentation/ref-manual/introduction.xml index 90d965f6d..ddf6a860e 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/introduction.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/introduction.xml @@ -168,20 +168,22 @@ <listitem><para>Ubuntu 14.10</para></listitem> <listitem><para>Ubuntu 15.04</para></listitem> <listitem><para>Ubuntu 15.10</para></listitem> + <listitem><para>Ubuntu 16.04</para></listitem> <!-- <listitem><para>Fedora 16 (Verne)</para></listitem> <listitem><para>Fedora 17 (Spherical)</para></listitem> <listitem><para>Fedora release 19 (Schrödinger's Cat)</para></listitem> <listitem><para>Fedora release 20 (Heisenbug)</para></listitem> --> - <listitem><para>Fedora release 21</para></listitem> <listitem><para>Fedora release 22</para></listitem> + <listitem><para>Fedora release 23</para></listitem> + <listitem><para>Fedora release 24</para></listitem> <!-- <listitem><para>CentOS release 5.6 (Final)</para></listitem> <listitem><para>CentOS release 5.7 (Final)</para></listitem> <listitem><para>CentOS release 5.8 (Final)</para></listitem> - <listitem><para>CentOS release 6.3 (Final)</para></listitem> --> - <listitem><para>CentOS release 6.x</para></listitem> + <listitem><para>CentOS release 6.3 (Final)</para></listitem> + <listitem><para>CentOS release 6.x</para></listitem> --> <listitem><para>CentOS release 7.x</para></listitem> -<!-- <listitem><para>Debian GNU/Linux 6.0 (Squeeze)</para></listitem> --> - <listitem><para>Debian GNU/Linux 7.x (Wheezy)</para></listitem> +<!-- <listitem><para>Debian GNU/Linux 6.0 (Squeeze)</para></listitem> + <listitem><para>Debian GNU/Linux 7.x (Wheezy)</para></listitem> --> <listitem><para>Debian GNU/Linux 8.x (Jessie)</para></listitem> <!-- <listitem><para>Debian GNU/Linux 7.1 (Wheezy)</para></listitem> <listitem><para>Debian GNU/Linux 7.2 (Wheezy)</para></listitem> @@ -195,6 +197,7 @@ <listitem><para>openSUSE 12.3</para></listitem> <listitem><para>openSUSE 13.1</para></listitem> --> <listitem><para>openSUSE 13.2</para></listitem> + <listitem><para>openSUSE 42.1</para></listitem> </itemizedlist> </para> diff --git a/import-layers/yocto-poky/documentation/ref-manual/migration.xml b/import-layers/yocto-poky/documentation/ref-manual/migration.xml index 3e7e6b084..2bdb542ec 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/migration.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/migration.xml @@ -3489,7 +3489,7 @@ <para> <filename>runqemu</filename> has been ported to Python and has changed behavior in some cases. - Previous usage patterns continued to be supported. + Previous usage patterns continue to be supported. </para> <para> @@ -3620,6 +3620,27 @@ $ runqemu qemux86-64 tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64. </para> </section> + <section id='migration-2.2-kernel-image-base-name-no-longer-uses-kernel-imagetype'> + <title><filename>KERNEL_IMAGE_BASE_NAME</filename> no Longer Uses <filename>KERNEL_IMAGETYPE</filename></title> + + <para> + The + <link linkend='var-KERNEL_IMAGE_BASE_NAME'><filename>KERNEL_IMAGE_BASE_NAME</filename></link> + variable no longer uses the + <link linkend='var-KERNEL_IMAGETYPE'><filename>KERNEL_IMAGETYPE</filename></link> + variable to create the image's base name. + Because the OpenEmbedded build system can now build multiple kernel + image types, this part of the kernel image base name as been + removed leaving only the following: + <literallayout class='monospaced'> + KERNEL_IMAGE_BASE_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME} + </literallayout> + If you have recipes or classes that use + <filename>KERNEL_IMAGE_BASE_NAME</filename> directly, you might + need to update the references to ensure they continue to work. + </para> + </section> + <section id='migration-2.2-bitbake-changes'> <title>BitBake Changes</title> diff --git a/import-layers/yocto-poky/documentation/ref-manual/ref-classes.xml b/import-layers/yocto-poky/documentation/ref-manual/ref-classes.xml index 2344a0406..f7b1126d7 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/ref-classes.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/ref-classes.xml @@ -1873,11 +1873,22 @@ </para> <para> - This means that each built kernel module is packaged separately and inter-module - dependencies are created by parsing the <filename>modinfo</filename> output. - If all modules are required, then installing the <filename>kernel-modules</filename> - package installs all packages with modules and various other kernel packages - such as <filename>kernel-vmlinux</filename>. + This means that each built kernel module is packaged separately and + inter-module dependencies are created by parsing the + <filename>modinfo</filename> output. + If all modules are required, then installing the + <filename>kernel-modules</filename> package installs all packages with + modules and various other kernel packages such as + <filename>kernel-vmlinux</filename>. + </para> + + <para> + The <filename>kernel</filename> class contains logic that allows + you to embed an initial RAM filesystem (initramfs) image when + you build the kernel image. + For information on how to build an initramfs, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. </para> <para> diff --git a/import-layers/yocto-poky/documentation/ref-manual/ref-features.xml b/import-layers/yocto-poky/documentation/ref-manual/ref-features.xml index cd1bcb024..282a51719 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/ref-features.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/ref-features.xml @@ -142,6 +142,18 @@ <listitem><para><emphasis>alsa:</emphasis> Include ALSA support (OSS compatibility kernel modules installed if available). </para></listitem> + <listitem><para><emphasis>api-documentation:</emphasis> + Enables generation of API documentation during recipe + builds. + The resulting documentation is added to SDK tarballs + when the + <filename>bitbake -c populate_sdk</filename> command + is used. + See the + "<ulink url='&YOCTO_DOCS_SDK_URL;#adding-api-documentation-to-the-standard-sdk'>Adding API Documentation to the Standard SDK</ulink>" + section in the Yocto Project Software Development Kit (SDK) + Developer's Guide for more information. + </para></listitem> <listitem><para><emphasis>bluetooth:</emphasis> Include bluetooth support (integrated BT only).</para></listitem> <listitem><para><emphasis>bluez5:</emphasis> Include diff --git a/import-layers/yocto-poky/documentation/ref-manual/ref-manual.xml b/import-layers/yocto-poky/documentation/ref-manual/ref-manual.xml index 09f34fb52..47f64769c 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/ref-manual.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/ref-manual.xml @@ -107,6 +107,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/ref-manual/ref-variables.xml b/import-layers/yocto-poky/documentation/ref-manual/ref-variables.xml index ce331d85b..807e24251 100644 --- a/import-layers/yocto-poky/documentation/ref-manual/ref-variables.xml +++ b/import-layers/yocto-poky/documentation/ref-manual/ref-variables.xml @@ -2273,12 +2273,13 @@ <glossentry id='var-CONFIG_INITRAMFS_SOURCE'><glossterm>CONFIG_INITRAMFS_SOURCE</glossterm> <info> - CONFIG_INITRAMFS_SOURCE[doc] = "Identifies the initial RAM disk (initramfs) source files. The OpenEmbedded build system receives and uses this kernel Kconfig variable as an environment variable." + CONFIG_INITRAMFS_SOURCE[doc] = "Identifies the initial RAM filesystem (initramfs) source files. The OpenEmbedded build system receives and uses this kernel Kconfig variable as an environment variable." </info> <glossdef> <para role="glossdeffirst"> <!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> --> - Identifies the initial RAM disk (initramfs) source files. + Identifies the initial RAM filesystem (initramfs) source + files. The OpenEmbedded build system receives and uses this kernel Kconfig variable as an environment variable. By default, the variable is set to null (""). @@ -2304,6 +2305,12 @@ If you specify multiple directories and files, the initramfs image will be the aggregate of all of them. </para> + + <para> + For information on creating an initramfs, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. + </para> </glossdef> </glossentry> @@ -4885,9 +4892,9 @@ is normally the same as the <link linkend='var-TARGET_OS'><filename>TARGET_OS</filename></link>. The variable can be set to "linux" for <filename>glibc</filename>-based systems and - to "linux-uclibc" for <filename>uclibc</filename>. + to "linux-musl" for <filename>musl</filename>. For ARM/EABI targets, there are also "linux-gnueabi" and - "linux-uclibc-gnueabi" values possible. + "linux-musleabi" values possible. </para> </glossdef> </glossentry> @@ -5405,9 +5412,12 @@ variable to specify packages for installation. Instead, use the <link linkend='var-PACKAGE_INSTALL'><filename>PACKAGE_INSTALL</filename></link> - variable, which allows the initial RAM disk (initramfs) - recipe to use a fixed set of packages and not be - affected by <filename>IMAGE_INSTALL</filename>. + variable, which allows the initial RAM filesystem + (initramfs) recipe to use a fixed set of packages and + not be affected by <filename>IMAGE_INSTALL</filename>. + For information on creating an initramfs, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. </note> </para> @@ -6133,13 +6143,13 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <glossentry id='var-INITRAMFS_FSTYPES'><glossterm>INITRAMFS_FSTYPES</glossterm> <info> - INITRAMFS_FSTYPES[doc] = "Defines the format for the output image of an initial RAM disk (initramfs), which is used during boot." + INITRAMFS_FSTYPES[doc] = "Defines the format for the output image of an initial RAM filesystem (initramfs), which is used during boot." </info> <glossdef> <para role="glossdeffirst"> <!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> --> Defines the format for the output image of an initial - RAM disk (initramfs), which is used during boot. + RAM filesystem (initramfs), which is used during boot. Supported formats are the same as those supported by the <link linkend='var-IMAGE_FSTYPES'><filename>IMAGE_FSTYPES</filename></link> variable. @@ -6152,7 +6162,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>Source Directory</ulink>, is "cpio.gz". The Linux kernel's initramfs mechanism, as opposed to the - initial RAM disk + initial RAM filesystem <ulink url='https://en.wikipedia.org/wiki/Initrd'>initrd</ulink> mechanism, expects an optionally compressed cpio archive. @@ -6162,7 +6172,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <glossentry id='var-INITRAMFS_IMAGE'><glossterm>INITRAMFS_IMAGE</glossterm> <info> - INITRAMFS_IMAGE[doc] = "Specifies the PROVIDES name of an image recipe that is used to build an initial RAM disk (initramfs) image." + INITRAMFS_IMAGE[doc] = "Specifies the PROVIDES name of an image recipe that is used to build an initial RAM filesystem (initramfs) image." </info> <glossdef> <para role="glossdeffirst"> @@ -6170,7 +6180,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" Specifies the <link linkend='var-PROVIDES'><filename>PROVIDES</filename></link> name of an image recipe that is used to build an initial - RAM disk (initramfs) image. + RAM filesystem (initramfs) image. An initramfs provides a temporary root filesystem used for early system initialization (e.g. loading of modules needed to locate and mount the "real" root filesystem). @@ -6211,17 +6221,21 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" </para> <para> - Finally, for more information you can also see the + For more information, you can also see the <link linkend='var-INITRAMFS_IMAGE_BUNDLE'><filename>INITRAMFS_IMAGE_BUNDLE</filename></link> variable, which allows the generated image to be bundled inside the kernel image. + Additionally, for information on creating an initramfs, see + the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. </para> </glossdef> </glossentry> <glossentry id='var-INITRAMFS_IMAGE_BUNDLE'><glossterm>INITRAMFS_IMAGE_BUNDLE</glossterm> <info> - INITRAMFS_IMAGE_BUNDLE[doc] = "Controls whether or not the image recipe specified by INITRAMFS_IMAGE is run through an extra pass (do_bundle_initramfs) during kernel compilation in order to build a single binary that contains both the kernel image and the initial RAM disk (initramfs)." + INITRAMFS_IMAGE_BUNDLE[doc] = "Controls whether or not the image recipe specified by INITRAMFS_IMAGE is run through an extra pass (do_bundle_initramfs) during kernel compilation in order to build a single binary that contains both the kernel image and the initial RAM filesystem (initramfs)." </info> <glossdef> <para role="glossdeffirst"> @@ -6231,8 +6245,8 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" is run through an extra pass (<link linkend='ref-tasks-bundle_initramfs'><filename>do_bundle_initramfs</filename></link>) during kernel compilation in order to build a single binary - that contains both the kernel image and the initial RAM disk - (initramfs). + that contains both the kernel image and the initial RAM + filesystem (initramfs) image. This makes use of the <link linkend='var-CONFIG_INITRAMFS_SOURCE'><filename>CONFIG_INITRAMFS_SOURCE</filename></link> kernel feature. @@ -6279,6 +6293,9 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" See the <ulink url='&YOCTO_GIT_URL;/cgit/cgit.cgi/poky/tree/meta-poky/conf/local.conf.sample.extended'><filename>local.conf.sample.extended</filename></ulink> file for additional information. + Also, for information on creating an initramfs, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. </para> </glossdef> </glossentry> @@ -6766,13 +6783,12 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <link linkend='ref-classes-kernel'>kernel</link> class as follows: <literallayout class='monospaced'> - KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}" + KERNEL_IMAGE_BASE_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}" </literallayout> </para> <para> See the - <link linkend='var-KERNEL_IMAGETYPE'><filename>KERNEL_IMAGETYPE</filename></link>, <link linkend='var-PKGE'><filename>PKGE</filename></link>, <link linkend='var-PKGV'><filename>PKGV</filename></link>, <link linkend='var-PKGR'><filename>PKGR</filename></link>, @@ -9106,9 +9122,12 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" the <link linkend='images-core-image-minimal-initramfs'><filename>core-image-minimal-initramfs</filename></link> image. - When working with an initial RAM disk (initramfs) + When working with an initial RAM filesystem (initramfs) image, use the <filename>PACKAGE_INSTALL</filename> variable. + For information on creating an initramfs, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#building-an-initramfs-image'>Building an Initial RAM Filesystem (initramfs) Image</ulink>" + section in the Yocto Project Development Manual. </para> </glossdef> </glossentry> @@ -10657,7 +10676,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <literallayout class='monospaced'> RDEPENDS_${PN} = "<replaceable>package</replaceable> (<replaceable>operator</replaceable> <replaceable>version</replaceable>)" </literallayout> - For <filename>operator</filename>, you can specify the + For <replaceable>operator</replaceable>, you can specify the following: <literallayout class='monospaced'> = @@ -10666,6 +10685,13 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <= >= </literallayout> + For <replaceable>version</replaceable>, provide the version + number. + <note><title>Tip</title> + You can use + <link linkend='var-EXTENDPKGV'><filename>EXTENDPKGV</filename></link> + to provide a full package version specification. + </note> For example, the following sets up a dependency on version 1.2 or greater of the package <filename>foo</filename>: <literallayout class='monospaced'> @@ -12685,9 +12711,22 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" Specifies the path to the top-level sysroots directory (i.e. <filename>${</filename><link linkend='var-TMPDIR'><filename>TMPDIR</filename></link><filename>}/sysroots</filename>). + </para> + + <para> + <filename>STAGING_DIR</filename> contains the directories + that are staged into the sysroot by the + <link linkend='ref-tasks-populate_sysroot'><filename>do_populate_sysroot</filename></link> + task. + See the + <link linkend='var-SYSROOT_DIRS'><filename>SYSROOT_DIRS</filename></link> + variable and the + "<ulink url='&YOCTO_DOCS_DEV_URL;#new-sharing-files-between-recipes'>Sharing Files Between Recipes</ulink>" + section for more information. <note> Recipes should never write files directly under - this directory because the OpenEmbedded build system + the <filename>STAGING_DIR</filename> directory because + the OpenEmbedded build system manages the directory automatically. Instead, files should be installed to <filename>${</filename><link linkend='var-D'><filename>D</filename></link><filename>}</filename> @@ -13731,9 +13770,9 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> --> Specifies the target's operating system. The variable can be set to "linux" for <filename>glibc</filename>-based systems and - to "linux-uclibc" for <filename>uclibc</filename>. + to "linux-musl" for <filename>musl</filename>. For ARM/EABI targets, there are also "linux-gnueabi" and - "linux-uclibc-gnueabi" values possible. + "linux-musleabi" values possible. </para> </glossdef> </glossentry> @@ -13862,7 +13901,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <glossentry id='var-TCLIBC'><glossterm>TCLIBC</glossterm> <info> - TCLIBC[doc] = "Specifies GNU standard C library (libc) variant to use during the build process. You can select 'glibc' or 'uclibc'." + TCLIBC[doc] = "Specifies GNU standard C library (libc) variant to use during the build process. You can select 'glibc' or 'musl'." </info> <glossdef> <para role="glossdeffirst"> @@ -13874,7 +13913,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" </para> <para> - You can select "glibc" or "uclibc". + You can select "glibc" or "musl". </para> </glossdef> </glossentry> @@ -13913,7 +13952,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" <link linkend='var-TCLIBC'><filename>TCLIBC</filename></link>, which controls the variant of the GNU standard C library (<filename>libc</filename>) used during the build process: - <filename>glibc</filename> or <filename>uclibc</filename>. + <filename>glibc</filename> or <filename>musl</filename>. </para> <para> @@ -14419,6 +14458,10 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" </literallayout> In this case, a default list of packages is set in this variable, but you can add additional packages to the list. + See the + "<ulink url='&YOCTO_DOCS_SDK_URL;#sdk-adding-individual-packages'>Adding Individual Packages to the Standard SDK</ulink>" + section in the Yocto Project Software Development Kit (SDK) + Developer's Guide for more information. </para> <para> @@ -14470,6 +14513,12 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" uses when it creates the target part of an SDK (i.e. the part built for the target hardware), which includes libraries and headers. + Use this variable to add individual packages to the + part of the SDK that runs on the target. + See the + "<ulink url='&YOCTO_DOCS_SDK_URL;#sdk-adding-individual-packages'>Adding Individual Packages to the Standard SDK</ulink>" + section in the Yocto Project Software Development Kit (SDK) + Developer's Guide for more information. </para> <para> @@ -15519,6 +15568,26 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" </glossdef> </glossentry> + <glossentry id='var-WKS_FILE'><glossterm>WKS_FILE</glossterm> + <info> + WKS_FILE[doc] = "Specifies the name of the wic kickstart file." + </info> + <glossdef> + <para role="glossdeffirst"> + Specifies the location of the Wic + kickstart file that is used by the OpenEmbedded build + system to create a partitioned image + (<replaceable>image</replaceable><filename>.wic</filename>). + For information on how to create a + partitioned image, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#creating-wic-images-oe'>Creating Partitioned Images</ulink>" + section. + For details on the kickstart file format, see the + "<ulink url='&YOCTO_DOCS_DEV_URL;#openembedded-kickstart-wks-reference'>OpenEmbedded Kickstart (<filename>.wks</filename>) Reference</ulink>. + </para> + </glossdef> + </glossentry> + <glossentry id='var-WORKDIR'><glossterm>WORKDIR</glossterm> <info> WORKDIR[doc] = "The pathname of the working directory in which the OpenEmbedded build system builds a recipe. This directory is located within the TMPDIR directory structure and changes as different packages are built." diff --git a/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing-standard.xml b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing-standard.xml new file mode 100644 index 000000000..f20891c80 --- /dev/null +++ b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing-standard.xml @@ -0,0 +1,58 @@ +<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" +"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" +[<!ENTITY % poky SYSTEM "../poky.ent"> %poky; ] > + +<appendix id='sdk-appendix-customizing-standard'> + +<title>Customizing the Standard SDK</title> + +<para> + This appendix presents customizations you can apply to the standard SDK. +</para> + +<section id='sdk-adding-individual-packages'> + <title>Adding Individual Packages to the Standard SDK</title> + + <para> + When you build a standard SDK using the + <filename>bitbake -c populate_sdk</filename>, a default set of + packages is included in the resulting SDK. + The + <ulink url='&YOCTO_DOCS_REF_URL;#var-TOOLCHAIN_HOST_TASK'><filename>TOOLCHAIN_HOST_TASK</filename></ulink> + and + <ulink url='&YOCTO_DOCS_REF_URL;#var-TOOLCHAIN_TARGET_TASK'><filename>TOOLCHAIN_TARGET_TASK</filename></ulink> + variables control the set of packages adding to the SDK. + </para> + + <para> + If you want to add individual packages to the toolchain that runs on + the host, simply add those packages to the + <filename>TOOLCHAIN_HOST_TASK</filename> variable. + Similarly, if you want to add packages to the default set that is + part of the toolchain that runs on the target, add the packages to the + <filename>TOOLCHAIN_TARGET_TASK</filename> variable. + </para> +</section> + +<section id='adding-api-documentation-to-the-standard-sdk'> + <title>Adding API Documentation to the Standard SDK</title> + + <para> + You can include API documentation as well as any other + documentation provided by recipes with the standard SDK by + adding "api-documentation" to the + <ulink url='&YOCTO_DOCS_REF_URL;#var-DISTRO_FEATURES'><filename>DISTRO_FEATURES</filename></ulink> + variable: + <literallayout class='monospaced'> + DISTRO_FEATURES_append = " api-documentation" + </literallayout> + Setting this variable as shown here causes the OpenEmbedded build + system to build the documentation and then include it in the standard + SDK. + </para> +</section> + +</appendix> +<!-- +vim: expandtab tw=80 ts=4 +--> diff --git a/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing.xml b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing.xml index e8a8b8cc9..965cccc2c 100644 --- a/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing.xml +++ b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-customizing.xml @@ -4,12 +4,10 @@ <appendix id='sdk-appendix-customizing'> -<title>Customizing the SDK</title> +<title>Customizing the Extensible SDK</title> <para> - This appendix presents customizations you can apply to both the standard - and extensible SDK. - Each subsection identifies the type of SDK to which the section applies. + This appendix presents customizations you can apply to the extensible SDK. </para> <section id='sdk-configuring-the-extensible-sdk'> diff --git a/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-mars.xml b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-mars.xml index 144e0720a..521f68263 100644 --- a/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-mars.xml +++ b/import-layers/yocto-poky/documentation/sdk-manual/sdk-appendix-mars.xml @@ -72,6 +72,24 @@ <listitem><para><emphasis>Launch Eclipse:</emphasis> Double click the "Eclipse" file in the folder to launch Eclipse. + <note> + If you experience a NullPointer Exception after + launch Eclipse or the debugger from within Eclipse, + try adding the following + to your <filename>eclipse.ini</filename> file, + which is located in the directory in which you + unpacked the Eclipse tar file: + <literallayout class='monospaced'> + --launcher.GTK_version + 2 + </literallayout> + Alternatively, you can export the + <filename>SWT_GTK</filename> variable in your + shell as follows: + <literallayout class='monospaced'> + $ export SWT_GTK3=0 + </literallayout> + </note> </para></listitem> </orderedlist> </para> diff --git a/import-layers/yocto-poky/documentation/sdk-manual/sdk-manual.xml b/import-layers/yocto-poky/documentation/sdk-manual/sdk-manual.xml index 6c72a0346..c32218968 100644 --- a/import-layers/yocto-poky/documentation/sdk-manual/sdk-manual.xml +++ b/import-layers/yocto-poky/documentation/sdk-manual/sdk-manual.xml @@ -41,6 +41,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> @@ -76,6 +86,8 @@ <xi:include href="sdk-appendix-customizing.xml"/> + <xi:include href="sdk-appendix-customizing-standard.xml"/> + <xi:include href="sdk-appendix-mars.xml"/> <!-- <index id='index'> diff --git a/import-layers/yocto-poky/documentation/toaster-manual/toaster-manual.xml b/import-layers/yocto-poky/documentation/toaster-manual/toaster-manual.xml index 386c51b32..05efb1f3a 100644 --- a/import-layers/yocto-poky/documentation/toaster-manual/toaster-manual.xml +++ b/import-layers/yocto-poky/documentation/toaster-manual/toaster-manual.xml @@ -51,6 +51,16 @@ <date>October 2016</date> <revremark>Released with the Yocto Project 2.2 Release.</revremark> </revision> + <revision> + <revnumber>2.2.1</revnumber> + <date>January 2017</date> + <revremark>Released with the Yocto Project 2.2.1 Release.</revremark> + </revision> + <revision> + <revnumber>2.2.2</revnumber> + <date>June 2017</date> + <revremark>Released with the Yocto Project 2.2.2 Release.</revremark> + </revision> </revhistory> <copyright> diff --git a/import-layers/yocto-poky/documentation/tools/mega-manual.sed b/import-layers/yocto-poky/documentation/tools/mega-manual.sed index b6d265f44..8aea1ced8 100644 --- a/import-layers/yocto-poky/documentation/tools/mega-manual.sed +++ b/import-layers/yocto-poky/documentation/tools/mega-manual.sed @@ -2,32 +2,32 @@ # This style is for manual folders like "yocto-project-qs" and "poky-ref-manual". # This is the old way that did it. Can't do that now that we have "bitbake-user-manual" strings # in the mega-manual. -# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/[a-z]*-[a-z]*-[a-z]*\/[a-z]*-[a-z]*-[a-z]*.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/poky-ref-manual\/poky-ref-manual.html#/\"link\" href=\"#/g +# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/[a-z]*-[a-z]*-[a-z]*\/[a-z]*-[a-z]*-[a-z]*.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/poky-ref-manual\/poky-ref-manual.html#/\"link\" href=\"#/g # Processes all other manuals (<word>-<word> style) except for the BitBake User Manual because # it is not included in the mega-manual. # This style is for manual folders that use two word, which is the standard now (e.g. "ref-manual"). # This was the one-liner that worked before we introduced the BitBake User Manual, which is # not in the mega-manual. -# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/[a-z]*-[a-z]*\/[a-z]*-[a-z]*.html#/\"link\" href=\"#/g +# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/[a-z]*-[a-z]*\/[a-z]*-[a-z]*.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/sdk-manual\/sdk-manual.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/bsp-guide\/bsp-guide.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/dev-manual\/dev-manual.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/kernel-dev\/kernel-dev.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/profile-manual\/profile-manual.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/ref-manual\/ref-manual.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/toaster-manual\/toaster-manual.html#/\"link\" href=\"#/g -s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/sdk-manual\/sdk-manual.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/bsp-guide\/bsp-guide.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/dev-manual\/dev-manual.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/kernel-dev\/kernel-dev.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/profile-manual\/profile-manual.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/ref-manual\/ref-manual.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/toaster-manual\/toaster-manual.html#/\"link\" href=\"#/g +s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g # Process cases where just an external manual is referenced without an id anchor -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/yocto-project-qs\/yocto-project-qs.html\" target=\"_top\">Yocto Project Quick Start<\/a>/Yocto Project Quick Start/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/dev-manual\/dev-manual.html\" target=\"_top\">Yocto Project Development Manual<\/a>/Yocto Project Development Manual/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/sdk-manual\/sdk-manual.html\" target=\"_top\">Yocto Project Software Development Kit (SDK) Developer's Guide<\/a>/Yocto Project Software Development Kit (SDK) Developer's Guide/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/bsp-guide\/bsp-guide.html\" target=\"_top\">Yocto Project Board Support Package (BSP) Developer's Guide<\/a>/Yocto Project Board Support Package (BSP) Developer's Guide/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/profile-manual\/profile-manual.html\" target=\"_top\">Yocto Project Profiling and Tracing Manual<\/a>/Yocto Project Profiling and Tracing Manual/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/kernel-dev\/kernel-dev.html\" target=\"_top\">Yocto Project Linux Kernel Development Manual<\/a>/Yocto Project Linux Kernel Development Manual/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/ref-manual\/ref-manual.html\" target=\"_top\">Yocto Project Reference Manual<\/a>/Yocto Project Reference Manual/g -s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2\/toaster-manual\/toaster-manual.html\" target=\"_top\">Toaster User Manual<\/a>/Toaster User Manual/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/yocto-project-qs\/yocto-project-qs.html\" target=\"_top\">Yocto Project Quick Start<\/a>/Yocto Project Quick Start/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/dev-manual\/dev-manual.html\" target=\"_top\">Yocto Project Development Manual<\/a>/Yocto Project Development Manual/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/sdk-manual\/sdk-manual.html\" target=\"_top\">Yocto Project Software Development Kit (SDK) Developer's Guide<\/a>/Yocto Project Software Development Kit (SDK) Developer's Guide/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/bsp-guide\/bsp-guide.html\" target=\"_top\">Yocto Project Board Support Package (BSP) Developer's Guide<\/a>/Yocto Project Board Support Package (BSP) Developer's Guide/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/profile-manual\/profile-manual.html\" target=\"_top\">Yocto Project Profiling and Tracing Manual<\/a>/Yocto Project Profiling and Tracing Manual/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/kernel-dev\/kernel-dev.html\" target=\"_top\">Yocto Project Linux Kernel Development Manual<\/a>/Yocto Project Linux Kernel Development Manual/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/ref-manual\/ref-manual.html\" target=\"_top\">Yocto Project Reference Manual<\/a>/Yocto Project Reference Manual/g +s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.2.2\/toaster-manual\/toaster-manual.html\" target=\"_top\">Toaster User Manual<\/a>/Toaster User Manual/g diff --git a/import-layers/yocto-poky/documentation/yocto-project-qs/yocto-project-qs.xml b/import-layers/yocto-poky/documentation/yocto-project-qs/yocto-project-qs.xml index d18f0aecd..950a4ff8b 100644 --- a/import-layers/yocto-poky/documentation/yocto-project-qs/yocto-project-qs.xml +++ b/import-layers/yocto-poky/documentation/yocto-project-qs/yocto-project-qs.xml @@ -302,7 +302,8 @@ <itemizedlist> <listitem><para><emphasis>Ubuntu and Debian</emphasis> <literallayout class='monospaced'> - $ sudo apt-get install &UBUNTU_HOST_PACKAGES_ESSENTIAL; libsdl1.2-dev xterm + $ sudo apt-get install &UBUNTU_HOST_PACKAGES_ESSENTIAL; \ + libsdl1.2-dev xterm </literallayout> </para></listitem> <listitem><para><emphasis>Fedora</emphasis> @@ -312,12 +313,14 @@ </para></listitem> <listitem><para><emphasis>OpenSUSE</emphasis> <literallayout class='monospaced'> - $ sudo zypper install &OPENSUSE_HOST_PACKAGES_ESSENTIAL; libSDL-devel xterm + $ sudo zypper install &OPENSUSE_HOST_PACKAGES_ESSENTIAL; \ + libSDL-devel xterm </literallayout> </para></listitem> <listitem><para><emphasis>CentOS</emphasis> <literallayout class='monospaced'> - $ sudo yum install &CENTOS_HOST_PACKAGES_ESSENTIAL; SDL-devel xterm + $ sudo yum install &CENTOS_HOST_PACKAGES_ESSENTIAL; \ + SDL-devel xterm </literallayout> <note> CentOS 6.x users need to ensure that the required @@ -727,7 +730,7 @@ Once the build completes, the resulting console-only image is located in the Build Directory here: <literallayout class='monospaced'> - tmp/deploy/images/intel-corei7-64/core-image-base-intel-corei7-64.hddimg + tmp/deploy/images/intel-corei7-64/core-image-base-intel-corei7-64.wic </literallayout> </para></listitem> <listitem><para><emphasis>Write the Image:</emphasis> @@ -735,14 +738,14 @@ (e.g. a USB key, SATA drive, SD card, etc.) using the <filename>dd</filename> utility: <literallayout class='monospaced'> - $ sudo dd if=tmp/deploy/images/intel-corei7-64/core-image-minimal-intel-corei7-64.wic of=TARGET_DEVICE + $ sudo dd if=tmp/deploy/images/intel-corei7-64/core-image-base-intel-corei7-64.wic of=TARGET_DEVICE </literallayout> In the previous command, the <filename>TARGET_DEVICE</filename> is the device node in the host machine (e.g. <filename>/dev/sdc</filename>, which is most likely a USB stick, or <filename>/dev/mmcblk0</filename>, which is most likely an - SD card. + SD card). </para></listitem> <listitem><para><emphasis>Boot the Hardware:</emphasis> With the boot device provisioned, you can insert the diff --git a/import-layers/yocto-poky/meta-poky/conf/distro/poky.conf b/import-layers/yocto-poky/meta-poky/conf/distro/poky.conf index ddf030a86..aca38c3d6 100644 --- a/import-layers/yocto-poky/meta-poky/conf/distro/poky.conf +++ b/import-layers/yocto-poky/meta-poky/conf/distro/poky.conf @@ -1,6 +1,6 @@ DISTRO = "poky" DISTRO_NAME = "Poky (Yocto Project Reference Distro)" -DISTRO_VERSION = "2.2" +DISTRO_VERSION = "2.2.2" DISTRO_CODENAME = "morty" SDK_VENDOR = "-pokysdk" SDK_VERSION := "${@'${DISTRO_VERSION}'.replace('snapshot-${DATE}','snapshot')}" diff --git a/import-layers/yocto-poky/meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py b/import-layers/yocto-poky/meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py index b3338ca85..9c47b5b04 100644 --- a/import-layers/yocto-poky/meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py +++ b/import-layers/yocto-poky/meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py @@ -44,7 +44,7 @@ class EdgeRouterTarget(MasterImageHardwareTarget): def __init__(self, d): super(EdgeRouterTarget, self).__init__(d) - self.image_fstype = self.get_image_fstype(d) + self.image_fstype = self.get_image_fstype(d) self.deploy_cmds = [ 'mount -L boot /boot', 'mkdir -p /mnt/testrootfs', diff --git a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.1.bbappend index 74b9c7c16..68bae2c4e 100644 --- a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -7,11 +7,11 @@ KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb" KMACHINE_genericx86 ?= "common-pc" KMACHINE_genericx86-64 ?= "common-pc-64" -SRCREV_machine_genericx86 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_genericx86-64 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_edgerouter ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_beaglebone ?= "12532e753b50997690923e03edb3ac3368817a26" -SRCREV_machine_mpc8315e-rdb ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" +SRCREV_machine_genericx86 ?= "fec49247816d7045aa8abe0047bcd4737af9a853" +SRCREV_machine_genericx86-64 ?= "fec49247816d7045aa8abe0047bcd4737af9a853" +SRCREV_machine_edgerouter ?= "fec49247816d7045aa8abe0047bcd4737af9a853" +SRCREV_machine_beaglebone ?= "938cc4ac8d36f166c9e2e0517d6ffd6d278fe631" +SRCREV_machine_mpc8315e-rdb ?= "fec49247816d7045aa8abe0047bcd4737af9a853" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" @@ -19,8 +19,8 @@ COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone = "beaglebone" COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb" -LINUX_VERSION_genericx86 = "4.1.33" -LINUX_VERSION_genericx86-64 = "4.1.33" -LINUX_VERSION_edgerouter = "4.1.33" -LINUX_VERSION_beaglebone = "4.1.33" -LINUX_VERSION_mpc8315e-rdb = "4.1.33" +LINUX_VERSION_genericx86 = "4.1.36" +LINUX_VERSION_genericx86-64 = "4.1.36" +LINUX_VERSION_edgerouter = "4.1.36" +LINUX_VERSION_beaglebone = "4.1.36" +LINUX_VERSION_mpc8315e-rdb = "4.1.36" diff --git a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.4.bbappend index 3f0c4dac1..a0efb152b 100644 --- a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -7,11 +7,11 @@ KBRANCH_edgerouter = "standard/edgerouter" KBRANCH_beaglebone = "standard/beaglebone" KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb" -SRCREV_machine_genericx86 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_genericx86-64 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_edgerouter ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_beaglebone ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_mpc8315e-rdb ?= "7fa42ad9a43ca4bb1e578e208ffeddae2d6150e2" +SRCREV_machine_genericx86 ?= "35482df5d5ba0807eb8a7c40b554bd657e3f9987" +SRCREV_machine_genericx86-64 ?= "35482df5d5ba0807eb8a7c40b554bd657e3f9987" +SRCREV_machine_edgerouter ?= "35482df5d5ba0807eb8a7c40b554bd657e3f9987" +SRCREV_machine_beaglebone ?= "35482df5d5ba0807eb8a7c40b554bd657e3f9987" +SRCREV_machine_mpc8315e-rdb ?= "772f071dbdd4b813c921058ddf9cba207237228b" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" @@ -19,8 +19,8 @@ COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone = "beaglebone" COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb" -LINUX_VERSION_genericx86 = "4.4.26" -LINUX_VERSION_genericx86-64 = "4.4.26" -LINUX_VERSION_edgerouter = "4.4.26" -LINUX_VERSION_beaglebone = "4.4.26" -LINUX_VERSION_mpc8315e-rdb = "4.4.26" +LINUX_VERSION_genericx86 = "4.4.36" +LINUX_VERSION_genericx86-64 = "4.4.36" +LINUX_VERSION_edgerouter = "4.4.36" +LINUX_VERSION_beaglebone = "4.4.36" +LINUX_VERSION_mpc8315e-rdb = "4.4.36" diff --git a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.8.bbappend index 14fa38ba1..761e6e0a3 100644 --- a/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -7,11 +7,11 @@ KBRANCH_edgerouter = "standard/edgerouter" KBRANCH_beaglebone = "standard/beaglebone" KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb" -SRCREV_machine_genericx86 ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_genericx86-64 ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_edgerouter ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_beaglebone ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_mpc8315e-rdb ?= "4be88b03f6648004e74b68044fa2b05e81cf9a1b" +SRCREV_machine_genericx86 ?= "021b4aef55b44597587a1ce5879be642b3dca155" +SRCREV_machine_genericx86-64 ?= "021b4aef55b44597587a1ce5879be642b3dca155" +SRCREV_machine_edgerouter ?= "6076f16536329465b62bd2037b8582a5e18f85d1" +SRCREV_machine_beaglebone ?= "85dc85153cd7e3b72d34f967c4c0edde590c79a8" +SRCREV_machine_mpc8315e-rdb ?= "f73222eb3bbd07a45564397a88dec554e848da7d" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" @@ -19,8 +19,8 @@ COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone = "beaglebone" COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb" -LINUX_VERSION_genericx86 = "4.8.3" -LINUX_VERSION_genericx86-64 = "4.8.3" -LINUX_VERSION_edgerouter = "4.8.3" -LINUX_VERSION_beaglebone = "4.8.3" -LINUX_VERSION_mpc8315e-rdb = "4.8.3" +LINUX_VERSION_genericx86 = "4.8.12" +LINUX_VERSION_genericx86-64 = "4.8.12" +LINUX_VERSION_edgerouter = "4.8.12" +LINUX_VERSION_beaglebone = "4.8.12" +LINUX_VERSION_mpc8315e-rdb = "4.8.12" diff --git a/import-layers/yocto-poky/meta/classes/archiver.bbclass b/import-layers/yocto-poky/meta/classes/archiver.bbclass index 9239983e8..188f8c042 100644 --- a/import-layers/yocto-poky/meta/classes/archiver.bbclass +++ b/import-layers/yocto-poky/meta/classes/archiver.bbclass @@ -125,7 +125,7 @@ python () { # (e.g. git repositories) is "unpacked" and then put into a tarball. python do_ar_original() { - import shutil, tarfile, tempfile + import shutil, tempfile if d.getVarFlag('ARCHIVER_MODE', 'src', True) != "original": return @@ -261,13 +261,9 @@ def create_tarball(d, srcdir, suffix, ar_outdir): filename = '%s.tar.gz' % d.getVar('PF', True) tarname = os.path.join(ar_outdir, filename) - srcdir = srcdir.rstrip('/') - dirname = os.path.dirname(srcdir) - basename = os.path.basename(srcdir) - os.chdir(dirname) bb.note('Creating %s' % tarname) tar = tarfile.open(tarname, 'w:gz') - tar.add(basename) + tar.add(srcdir, arcname=os.path.basename(srcdir)) tar.close() # creating .diff.gz between source.orig and source @@ -353,8 +349,8 @@ python do_ar_recipe () { bbappend_files = d.getVar('BBINCLUDED', True).split() # If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend # Files like aa1.bbappend or aa1_1.1.bbappend must be excluded. - bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" %pn) - bbappend_re1 = re.compile( r".*/%s\.bbappend$" %pn) + bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" % re.escape(pn)) + bbappend_re1 = re.compile( r".*/%s\.bbappend$" % re.escape(pn)) for file in bbappend_files: if bbappend_re.match(file) or bbappend_re1.match(file): shutil.copy(file, outdir) diff --git a/import-layers/yocto-poky/meta/classes/cmake.bbclass b/import-layers/yocto-poky/meta/classes/cmake.bbclass index 3e762de6a..fad0baa51 100644 --- a/import-layers/yocto-poky/meta/classes/cmake.bbclass +++ b/import-layers/yocto-poky/meta/classes/cmake.bbclass @@ -19,6 +19,8 @@ OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG" OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG" OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}" OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}" +CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}" +CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}" OECMAKE_RPATH ?= "" OECMAKE_PERLNATIVE_DIR ??= "" diff --git a/import-layers/yocto-poky/meta/classes/cve-check.bbclass b/import-layers/yocto-poky/meta/classes/cve-check.bbclass index 1425a4055..75b8fa9ab 100644 --- a/import-layers/yocto-poky/meta/classes/cve-check.bbclass +++ b/import-layers/yocto-poky/meta/classes/cve-check.bbclass @@ -20,6 +20,10 @@ # the only method to check against CVEs. Running this tool # doesn't guarantee your packages are free of CVEs. +# The product name that the CVE database uses. Defaults to BPN, but may need to +# be overriden per recipe (for example tiff.bb sets CVE_PRODUCT=libtiff). +CVE_PRODUCT ?= "${BPN}" + CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK" CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvd.db" @@ -39,7 +43,7 @@ CVE_CHECK_PN_WHITELIST = "\ # Whitelist for CVE and version of package CVE_CHECK_CVE_WHITELIST = "{\ - 'CVE-2014-2524': ('6.3',), \ + 'CVE-2014-2524': ('6.3','5.2',), \ }" python do_cve_check () { @@ -144,7 +148,7 @@ def check_cves(d, patched_cves): cves_patched = [] cves_unpatched = [] - bpn = d.getVar("BPN", True) + bpn = d.getVar("CVE_PRODUCT") pv = d.getVar("PV", True).split("git+")[0] cves = " ".join(patched_cves) cve_db_dir = d.getVar("CVE_CHECK_DB_DIR", True) diff --git a/import-layers/yocto-poky/meta/classes/icecc.bbclass b/import-layers/yocto-poky/meta/classes/icecc.bbclass index a83789415..c57257151 100644 --- a/import-layers/yocto-poky/meta/classes/icecc.bbclass +++ b/import-layers/yocto-poky/meta/classes/icecc.bbclass @@ -42,6 +42,7 @@ def icecc_dep_prepend(d): DEPENDS_prepend += "${@icecc_dep_prepend(d)} " +get_cross_kernel_cc[vardepsexclude] += "KERNEL_CC" def get_cross_kernel_cc(bb,d): kernel_cc = d.getVar('KERNEL_CC', False) diff --git a/import-layers/yocto-poky/meta/classes/image.bbclass b/import-layers/yocto-poky/meta/classes/image.bbclass index 9f5869e36..a9ab2fac1 100644 --- a/import-layers/yocto-poky/meta/classes/image.bbclass +++ b/import-layers/yocto-poky/meta/classes/image.bbclass @@ -457,7 +457,7 @@ python () { rm_tmp_images = set() def gen_conversion_cmds(bt): for ctype in sorted(ctypes): - if bt[bt.find('.') + 1:] == ctype: + if bt.endswith("." + ctype): type = bt[0:-len(ctype) - 1] if type.startswith("debugfs_"): type = type[8:] @@ -487,7 +487,7 @@ python () { # Clean up after applying all conversion commands. Some of them might # use the same input, therefore we cannot delete sooner without applying # some complex dependency analysis. - for image in rm_tmp_images: + for image in sorted(rm_tmp_images): cmds.append("\trm " + image) after = 'do_image' diff --git a/import-layers/yocto-poky/meta/classes/image_types.bbclass b/import-layers/yocto-poky/meta/classes/image_types.bbclass index 1ce8334e3..3bfa60ba2 100644 --- a/import-layers/yocto-poky/meta/classes/image_types.bbclass +++ b/import-layers/yocto-poky/meta/classes/image_types.bbclass @@ -17,17 +17,25 @@ def imagetypes_getdepends(d): d += ":do_populate_sysroot" deps.add(d) + # Take a type in the form of foo.bar.car and split it into the items + # needed for the image deps "foo", and the conversion deps ["bar", "car"] + def split_types(typestring): + types = typestring.split(".") + return types[0], types[1:] + fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split()) fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split()) deps = set() for typestring in fstypes: - types = typestring.split(".") - basetype, resttypes = types[0], types[1:] - + basetype, resttypes = split_types(typestring) adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps) + for typedepends in (d.getVar("IMAGE_TYPEDEP_%s" % basetype, True) or "").split(): - adddep(d.getVar('IMAGE_DEPENDS_%s' % typedepends, True) , deps) + base, rest = split_types(typedepends) + adddep(d.getVar('IMAGE_DEPENDS_%s' % base, True) , deps) + resttypes += rest + for ctype in resttypes: adddep(d.getVar("CONVERSION_DEPENDS_%s" % ctype, True), deps) adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps) diff --git a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass index 6c8c1ff60..933fa4d9c 100644 --- a/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass +++ b/import-layers/yocto-poky/meta/classes/image_types_uboot.bbclass @@ -3,9 +3,6 @@ inherit image_types kernel-arch oe_mkimage () { mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \ -d ${IMGDEPLOYDIR}/$1 ${IMGDEPLOYDIR}/$1.u-boot - if [ x$3 = x"clean" ]; then - rm $1 - fi } CONVERSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot" @@ -14,13 +11,13 @@ CONVERSION_DEPENDS_u-boot = "u-boot-mkimage-native" CONVERSION_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none" CONVERSION_DEPENDS_gz.u-boot = "u-boot-mkimage-native" -CONVERSION_CMD_gz.u-boot = "${CONVERSION_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip clean" +CONVERSION_CMD_gz.u-boot = "${CONVERSION_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip" CONVERSION_DEPENDS_bz2.u-boot = "u-boot-mkimage-native" -CONVERSION_CMD_bz2.u-boot = "${CONVERSION_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2 clean" +CONVERSION_CMD_bz2.u-boot = "${CONVERSION_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2" CONVERSION_DEPENDS_lzma.u-boot = "u-boot-mkimage-native" -CONVERSION_CMD_lzma.u-boot = "${CONVERSION_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean" +CONVERSION_CMD_lzma.u-boot = "${CONVERSION_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma" IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot" diff --git a/import-layers/yocto-poky/meta/classes/insane.bbclass b/import-layers/yocto-poky/meta/classes/insane.bbclass index 1d7377825..7bbe8b63a 100644 --- a/import-layers/yocto-poky/meta/classes/insane.bbclass +++ b/import-layers/yocto-poky/meta/classes/insane.bbclass @@ -63,6 +63,8 @@ def package_qa_get_machine_dict(d): "arm" : (40, 0, 0, True, 32), }, "elf" : { + "aarch64" : (183, 0, 0, True, 64), + "aarch64_be" :(183, 0, 0, False, 64), "i586" : (3, 0, 0, True, 32), "x86_64": (62, 0, 0, True, 64), "epiphany": (4643, 0, 0, True, 32), @@ -403,47 +405,6 @@ def package_qa_check_perm(path,name,d, elf, messages): """ return -QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries" -def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages): - """ - Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix - """ - if unsafe_references_skippable(path, name, d): - return - - if elf: - import subprocess as sub - pn = d.getVar('PN', True) - - exec_prefix = d.getVar('exec_prefix', True) - sysroot_path = d.getVar('STAGING_DIR_TARGET', True) - sysroot_path_usr = sysroot_path + exec_prefix - - try: - ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read().decode("utf-8") - except bb.process.CmdError: - error_msg = pn + ": prelink-rtld aborted when processing %s" % path - package_qa_handle_error("unsafe-references-in-binaries", error_msg, d) - return False - - if sysroot_path_usr in ldd_output: - ldd_output = ldd_output.replace(sysroot_path, "") - - pkgdest = d.getVar('PKGDEST', True) - packages = d.getVar('PACKAGES', True) - - for package in packages.split(): - short_path = path.replace('%s/%s' % (pkgdest, package), "", 1) - if (short_path != path): - break - - base_err = pn + ": %s, installed in the base_prefix, requires a shared library under exec_prefix (%s)" % (short_path, exec_prefix) - for line in ldd_output.split('\n'): - if exec_prefix in line: - error_msg = "%s: %s" % (base_err, line.strip()) - package_qa_handle_error("unsafe-references-in-binaries", error_msg, d) - - return False QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts" def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages): @@ -647,8 +608,8 @@ def package_qa_check_buildpaths(path, name, d, elf, messages): return tmpdir = d.getVar('TMPDIR', True) - with open(path) as f: - file_content = f.read() + with open(path, 'rb') as f: + file_content = f.read().decode('utf-8', errors='ignore') if tmpdir in file_content: package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d)) @@ -820,6 +781,23 @@ def package_qa_check_staged(path,d): return sane +# Run all package-wide warnfuncs and errorfuncs +def package_qa_package(warnfuncs, errorfuncs, skip, package, d): + warnings = {} + errors = {} + + for func in warnfuncs: + func(package, d, warnings) + for func in errorfuncs: + func(package, d, errors) + + for w in warnings: + package_qa_handle_error(w, warnings[w], d) + for e in errors: + package_qa_handle_error(e, errors[e], d) + + return len(errors) == 0 + # Walk over all files in a directory and call func def package_qa_walk(warnfuncs, errorfuncs, skip, package, d): import oe.qa @@ -855,7 +833,7 @@ def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d): if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg: localdata = bb.data.createCopy(d) - localdata.setVar('OVERRIDES', pkg) + localdata.setVar('OVERRIDES', localdata.getVar('OVERRIDES', True) + ':' + pkg) bb.data.update_data(localdata) # Now check the RDEPENDS @@ -983,31 +961,24 @@ def package_qa_check_deps(pkg, pkgdest, skip, d): check_valid_deps('RREPLACES') check_valid_deps('RCONFLICTS') -QAPATHTEST[expanded-d] = "package_qa_check_expanded_d" -def package_qa_check_expanded_d(path,name,d,elf,messages): +QAPKGTEST[expanded-d] = "package_qa_check_expanded_d" +def package_qa_check_expanded_d(package, d, messages): """ Check for the expanded D (${D}) value in pkg_* and FILES variables, warn the user to use it correctly. """ - sane = True - expanded_d = d.getVar('D',True) - - # Get packages for current recipe and iterate - packages = d.getVar('PACKAGES', True).split(" ") - for pak in packages: - # Go through all variables and check if expanded D is found, warn the user accordingly - for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm': - bbvar = d.getVar(var + "_" + pak, False) - if bbvar: - # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value - if expanded_d in bbvar: - if var == 'FILES': - package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak) - sane = False - else: - package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak)) - sane = False + expanded_d = d.getVar('D', True) + + for var in 'FILES','pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm': + bbvar = d.getVar(var + "_" + package, True) or "" + if expanded_d in bbvar: + if var == 'FILES': + package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % package) + sane = False + else: + package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, package)) + sane = False return sane def package_qa_check_encoding(keys, encode, d): @@ -1115,7 +1086,6 @@ python do_package_qa () { if not packages: return - testmatrix = d.getVarFlags("QAPATHTEST") import re # The package name matches the [a-z0-9.+-]+ regular expression pkgname_pattern = re.compile("^[a-z0-9.+-]+$") @@ -1125,28 +1095,33 @@ python do_package_qa () { for dep in taskdepdata: taskdeps.add(taskdepdata[dep][0]) - g = globals() for package in packages: + def parse_test_matrix(matrix_name): + testmatrix = d.getVarFlags(matrix_name) or {} + g = globals() + warnchecks = [] + for w in (d.getVar("WARN_QA", True) or "").split(): + if w in skip: + continue + if w in testmatrix and testmatrix[w] in g: + warnchecks.append(g[testmatrix[w]]) + if w == 'unsafe-references-in-binaries': + oe.utils.write_ld_so_conf(d) + + errorchecks = [] + for e in (d.getVar("ERROR_QA", True) or "").split(): + if e in skip: + continue + if e in testmatrix and testmatrix[e] in g: + errorchecks.append(g[testmatrix[e]]) + if e == 'unsafe-references-in-binaries': + oe.utils.write_ld_so_conf(d) + return warnchecks, errorchecks + skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split() if skip: bb.note("Package %s skipping QA tests: %s" % (package, str(skip))) - warnchecks = [] - for w in (d.getVar("WARN_QA", True) or "").split(): - if w in skip: - continue - if w in testmatrix and testmatrix[w] in g: - warnchecks.append(g[testmatrix[w]]) - if w == 'unsafe-references-in-binaries': - oe.utils.write_ld_so_conf(d) - - errorchecks = [] - for e in (d.getVar("ERROR_QA", True) or "").split(): - if e in skip: - continue - if e in testmatrix and testmatrix[e] in g: - errorchecks.append(g[testmatrix[e]]) - if e == 'unsafe-references-in-binaries': - oe.utils.write_ld_so_conf(d) + bb.note("Checking Package: %s" % package) # Check package name @@ -1154,8 +1129,11 @@ python do_package_qa () { package_qa_handle_error("pkgname", "%s doesn't match the [a-z0-9.+-]+ regex" % package, d) - path = "%s/%s" % (pkgdest, package) - package_qa_walk(warnchecks, errorchecks, skip, package, d) + warn_checks, error_checks = parse_test_matrix("QAPATHTEST") + package_qa_walk(warn_checks, error_checks, skip, package, d) + + warn_checks, error_checks = parse_test_matrix("QAPKGTEST") + package_qa_package(warn_checks, error_checks, skip, package, d) package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d) package_qa_check_deps(package, pkgdest, skip, d) diff --git a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass index 6160a29ec..a60327a07 100644 --- a/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass +++ b/import-layers/yocto-poky/meta/classes/kernel-yocto.bbclass @@ -149,12 +149,18 @@ do_kernel_metadata() { elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`" if [ -n "${elements}" ]; then scc --force -o ${S}/${meta_dir}:cfg,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES} + if [ $? -ne 0 ]; then + bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + fi fi # run2: only generate patches for elements that have been passed on the SRC_URI elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`" if [ -n "${elements}" ]; then scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES} + if [ $? -ne 0 ]; then + bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + fi fi } diff --git a/import-layers/yocto-poky/meta/classes/kernel.bbclass b/import-layers/yocto-poky/meta/classes/kernel.bbclass index 25a153cd2..eefe574a6 100644 --- a/import-layers/yocto-poky/meta/classes/kernel.bbclass +++ b/import-layers/yocto-poky/meta/classes/kernel.bbclass @@ -43,12 +43,12 @@ python __anonymous () { typeformake = re.sub(r'\.gz', '', types) d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake) - for type in typeformake.split(): + for type in types.split(): typelower = type.lower() d.appendVar('PACKAGES', ' ' + 'kernel-image-' + typelower) - d.setVar('FILES_kernel-image-' + typelower, '/boot/' + type + '*') + d.setVar('FILES_kernel-image-' + typelower, '/boot/' + type + '-${KERNEL_VERSION_NAME}') d.appendVar('RDEPENDS_kernel-image', ' ' + 'kernel-image-' + typelower) @@ -165,7 +165,7 @@ copy_initramfs() { mkdir -p ${B}/usr # Find and use the first initramfs image archive type we find rm -f ${B}/usr/${INITRAMFS_IMAGE}-${MACHINE}.cpio - for img in cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do + for img in cpio cpio.gz cpio.lz4 cpio.lzo cpio.lzma cpio.xz; do if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then cp ${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img ${B}/usr/. case $img in @@ -235,12 +235,6 @@ do_bundle_initramfs () { mv -f ${KERNEL_OUTPUT_DIR}/$type.bak ${KERNEL_OUTPUT_DIR}/$type fi done - # Update install area - for type in ${KERNEL_IMAGETYPES} ; do - echo "There is kernel image bundled with initramfs: ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs" - install -m 0644 ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs ${D}/boot/$type-initramfs-${MACHINE}.bin - echo "${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs" - done fi } do_bundle_initramfs[dirs] = "${B}" @@ -270,6 +264,7 @@ kernel_do_compile() { oe_runmake ${typeformake} CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} $use_alternate_initrd for type in ${KERNEL_IMAGETYPES} ; do if test "${typeformake}.gz" = "${type}"; then + mkdir -p "${KERNEL_OUTPUT_DIR}" gzip -9c < "${typeformake}" > "${KERNEL_OUTPUT_DIR}/${type}" break; fi @@ -486,7 +481,7 @@ FILES_${PN} = "" FILES_kernel-base = "/lib/modules/${KERNEL_VERSION}/modules.order /lib/modules/${KERNEL_VERSION}/modules.builtin" FILES_kernel-image = "" FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config* ${KERNEL_SRC_PATH} /lib/modules/${KERNEL_VERSION}/build" -FILES_kernel-vmlinux = "/boot/vmlinux*" +FILES_kernel-vmlinux = "/boot/vmlinux-${KERNEL_VERSION_NAME}" FILES_kernel-modules = "" RDEPENDS_kernel = "kernel-base" # Allow machines to override this dependency if kernel image files are @@ -612,8 +607,6 @@ kernel_do_deploy() { ln -sf ${base_name}.bin ${DEPLOYDIR}/${type} done - cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt - cd ${B} # Update deploy directory for type in ${KERNEL_IMAGETYPES} ; do diff --git a/import-layers/yocto-poky/meta/classes/license.bbclass b/import-layers/yocto-poky/meta/classes/license.bbclass index da4fc3e1d..721343d0f 100644 --- a/import-layers/yocto-poky/meta/classes/license.bbclass +++ b/import-layers/yocto-poky/meta/classes/license.bbclass @@ -279,7 +279,7 @@ def get_deployed_files(man_file): """ dep_files = [] - excluded_files = ["README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"] + excluded_files = [] with open(man_file, "r") as manifest: all_files = manifest.read() for f in all_files.splitlines(): @@ -351,6 +351,8 @@ def copy_license_files(lic_files_paths, destdir): dst = os.path.join(destdir, basename) if os.path.exists(dst): os.remove(dst) + if os.path.islink(src): + src = os.path.realpath(src) canlink = os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev) if canlink: try: diff --git a/import-layers/yocto-poky/meta/classes/mirrors.bbclass b/import-layers/yocto-poky/meta/classes/mirrors.bbclass index 11847085b..2cdc71b6e 100644 --- a/import-layers/yocto-poky/meta/classes/mirrors.bbclass +++ b/import-layers/yocto-poky/meta/classes/mirrors.bbclass @@ -2,24 +2,24 @@ MIRRORS += "\ ${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool \n \ ${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20110127T084257Z/debian/pool \n \ ${DEBIAN_MIRROR} http://snapshot.debian.org/archive/debian-archive/20090802T004153Z/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \ -${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \ -${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \ +${DEBIAN_MIRROR} http://ftp.de.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.au.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.cl.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.hr.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.fi.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.hk.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.hu.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.ie.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.it.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.jp.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.no.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.pl.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.ro.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.si.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.es.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.se.debian.org/debian/pool \n \ +${DEBIAN_MIRROR} http://ftp.tr.debian.org/debian/pool \n \ +${GNU_MIRROR} https://mirrors.kernel.org/gnu \n \ ${KERNELORG_MIRROR} http://www.kernel.org/pub \n \ ${GNUPG_MIRROR} ftp://ftp.gnupg.org/gcrypt \n \ ${GNUPG_MIRROR} ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt \n \ diff --git a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass index a78257c19..31dde4a90 100644 --- a/import-layers/yocto-poky/meta/classes/nativesdk.bbclass +++ b/import-layers/yocto-poky/meta/classes/nativesdk.bbclass @@ -97,3 +97,5 @@ do_populate_sysroot[stamp-extra-info] = "" do_packagedata[stamp-extra-info] = "" USE_NLS = "${SDKUSE_NLS}" + +OLDEST_KERNEL = "${SDK_OLDEST_KERNEL}" diff --git a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass index eb0093233..e7e7d4929 100644 --- a/import-layers/yocto-poky/meta/classes/package_ipk.bbclass +++ b/import-layers/yocto-poky/meta/classes/package_ipk.bbclass @@ -48,6 +48,8 @@ python do_package_ipk () { if os.path.exists(p): bb.utils.prunedir(p) + recipesource = os.path.basename(d.getVar('FILE', True)) + for pkg in packages.split(): localdata = bb.data.createCopy(d) root = "%s/%s" % (pkgdest, pkg) @@ -212,10 +214,7 @@ python do_package_ipk () { ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces)) if rconflicts: ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts)) - src_uri = localdata.getVar("SRC_URI", True).strip() or "None" - if src_uri: - src_uri = re.sub("\s+", " ", src_uri) - ctrlfile.write("Source: %s\n" % " ".join(src_uri.split())) + ctrlfile.write("Source: %s\n" % recipesource) ctrlfile.close() for script in ["preinst", "postinst", "prerm", "postrm"]: diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass index 4462b52cb..69aae2644 100644 --- a/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass +++ b/import-layers/yocto-poky/meta/classes/populate_sdk_base.bbclass @@ -89,11 +89,6 @@ POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; " SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}" SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} " -# Some archs override this, we need the nativesdk version -# turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH -# manipulation. -SDK_OLDEST_KERNEL = "3.2.0" - def populate_sdk_common(d): from oe.sdk import populate_sdk from oe.manifest import create_manifest, Manifest @@ -223,7 +218,7 @@ EOF -e 's#@SDKEXTPATH@#${SDKEXTPATH}#g' \ -e 's#@OLDEST_KERNEL@#${SDK_OLDEST_KERNEL}#g' \ -e 's#@REAL_MULTIMACH_TARGET_SYS@#${REAL_MULTIMACH_TARGET_SYS}#g' \ - -e 's#@SDK_TITLE@#${SDK_TITLE}#g' \ + -e 's#@SDK_TITLE@#${@d.getVar("SDK_TITLE", True).replace('&', '\&')}#g' \ -e 's#@SDK_VERSION@#${SDK_VERSION}#g' \ -e '/@SDK_PRE_INSTALL_COMMAND@/d' \ -e '/@SDK_POST_INSTALL_COMMAND@/d' \ diff --git a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass index 0f0525d76..39f614274 100644 --- a/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass +++ b/import-layers/yocto-poky/meta/classes/populate_sdk_ext.bbclass @@ -88,7 +88,7 @@ SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar( def clean_esdk_builddir(d, sdkbasepath): """Clean up traces of the fake build for create_filtered_tasklist()""" import shutil - cleanpaths = 'cache conf/sanity_info conf/templateconf.cfg tmp'.split() + cleanpaths = 'cache conf/sanity_info tmp'.split() for pth in cleanpaths: fullpth = os.path.join(sdkbasepath, pth) if os.path.isdir(fullpth): @@ -305,10 +305,13 @@ python copy_buildsystem () { f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n') # Set up whitelist for run on install - f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work"\n\n') + f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work *:do_package"\n\n') # Hide the config information from bitbake output (since it's fixed within the SDK) - f.write('BUILDCFG_HEADER = ""\n') + f.write('BUILDCFG_HEADER = ""\n\n') + + # Map gcc-dependent uninative sstate cache for installer usage + f.write('SSTATE_MIRRORS = "file://universal/(.*) file://universal-4.9/\\1\\nfile://universal-4.9/(.*) file://universal-4.8/\\1"\n\n') # Allow additional config through sdk-extra.conf fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d) @@ -344,6 +347,10 @@ python copy_buildsystem () { if line.strip() and not line.startswith('#'): f.write(line) + # Write a templateconf.cfg + with open(baseoutpath + '/conf/templateconf.cfg', 'w') as f: + f.write('meta/conf\n') + # Ensure any variables set from the external environment (by way of # BB_ENV_EXTRAWHITE) are set in the SDK's configuration extralines = [] @@ -370,8 +377,9 @@ python copy_buildsystem () { sstate_out = baseoutpath + '/sstate-cache' bb.utils.remove(sstate_out, True) - # uninative.bbclass sets NATIVELSBSTRING to 'universal' - fixedlsbstring = 'universal' + + # uninative.bbclass sets NATIVELSBSTRING to 'universal%s' % oe.utils.host_gcc_version(d) + fixedlsbstring = "universal%s" % oe.utils.host_gcc_version(d) sdk_include_toolchain = (d.getVar('SDK_INCLUDE_TOOLCHAIN', True) == '1') sdk_ext_type = d.getVar('SDK_EXT_TYPE', True) diff --git a/import-layers/yocto-poky/meta/classes/rm_work.bbclass b/import-layers/yocto-poky/meta/classes/rm_work.bbclass index b71a9d1cf..64b6981a4 100644 --- a/import-layers/yocto-poky/meta/classes/rm_work.bbclass +++ b/import-layers/yocto-poky/meta/classes/rm_work.bbclass @@ -58,7 +58,7 @@ do_rm_work () { *do_setscene*) break ;; - *sigdata*) + *sigdata*|*sigbasedata*) i=dummy break ;; diff --git a/import-layers/yocto-poky/meta/classes/sanity.bbclass b/import-layers/yocto-poky/meta/classes/sanity.bbclass index 7682ffbb8..a11b581a0 100644 --- a/import-layers/yocto-poky/meta/classes/sanity.bbclass +++ b/import-layers/yocto-poky/meta/classes/sanity.bbclass @@ -929,7 +929,9 @@ def check_sanity_everybuild(status, d): # If /bin/sh is a symlink, check that it points to dash or bash if os.path.islink('/bin/sh'): real_sh = os.path.realpath('/bin/sh') - if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'): + # Due to update-alternatives, the shell name may take various + # forms, such as /bin/dash, bin/bash, /bin/bash.bash ... + if '/dash' not in real_sh and '/bash' not in real_sh: status.addresult("Error, /bin/sh links to %s, must be dash or bash\n" % real_sh) def check_sanity(sanity_data): diff --git a/import-layers/yocto-poky/meta/classes/sstate.bbclass b/import-layers/yocto-poky/meta/classes/sstate.bbclass index 172384b37..5b92c5485 100644 --- a/import-layers/yocto-poky/meta/classes/sstate.bbclass +++ b/import-layers/yocto-poky/meta/classes/sstate.bbclass @@ -30,8 +30,6 @@ SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml" # Archive the sources for many architectures in one deploy folder SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}" -# Ignore overlapping README -SSTATE_DUPWHITELIST += "${DEPLOY_DIR}/sdk/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt" SSTATE_SCAN_FILES ?= "*.la *-config *_config" SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f' @@ -457,7 +455,7 @@ def sstate_clean(ss, d): rm_nohash = ".do_%s" % ss['task'] for stfile in glob.glob(wildcard_stfile): # Keep the sigdata - if ".sigdata." in stfile: + if ".sigdata." in stfile or ".sigbasedata." in stfile: continue # Preserve taint files in the stamps directory if stfile.endswith('.taint'): @@ -724,6 +722,8 @@ python sstate_sign_package () { # sstate_unpack_package () { tar -xvzf ${SSTATE_PKG} + # update .siginfo atime on local/NFS mirror + [ -w ${SSTATE_PKG}.siginfo ] && [ -h ${SSTATE_PKG}.siginfo ] && touch -a ${SSTATE_PKG}.siginfo # Use "! -w ||" to return true for read only files [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG} [ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig diff --git a/import-layers/yocto-poky/meta/classes/staging.bbclass b/import-layers/yocto-poky/meta/classes/staging.bbclass index a0b09a00b..bfabd06f3 100644 --- a/import-layers/yocto-poky/meta/classes/staging.bbclass +++ b/import-layers/yocto-poky/meta/classes/staging.bbclass @@ -171,7 +171,6 @@ addtask populate_sysroot after do_install SYSROOT_PREPROCESS_FUNCS ?= "" SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir" -SYSROOT_LOCK = "${STAGING_DIR}/staging.lock" # We clean out any existing sstate from the sysroot if we rerun configure python sysroot_cleansstate () { diff --git a/import-layers/yocto-poky/meta/classes/systemd.bbclass b/import-layers/yocto-poky/meta/classes/systemd.bbclass index d56c760a1..4ea1f45e9 100644 --- a/import-layers/yocto-poky/meta/classes/systemd.bbclass +++ b/import-layers/yocto-poky/meta/classes/systemd.bbclass @@ -32,7 +32,7 @@ if type systemctl >/dev/null 2>/dev/null; then systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE} if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then - systemctl restart ${SYSTEMD_SERVICE} + systemctl --no-block restart ${SYSTEMD_SERVICE} fi fi } diff --git a/import-layers/yocto-poky/meta/classes/testsdk.bbclass b/import-layers/yocto-poky/meta/classes/testsdk.bbclass index 77c9203cf..43342b1f2 100644 --- a/import-layers/yocto-poky/meta/classes/testsdk.bbclass +++ b/import-layers/yocto-poky/meta/classes/testsdk.bbclass @@ -4,13 +4,15 @@ # testsdk.bbclass enables testing for SDK and Extensible SDK # -# For run SDK tests you need to do, -# - bitbake core-image-sato -c populate_sdk -# - bitbake core-image-sato -c testsdk +# To run SDK tests, run the commands: +# $ bitbake <image-name> -c populate_sdk +# $ bitbake <image-name> -c testsdk # -# For run eSDK tests you need to do, -# - bitbake core-image-sato -c populate_sdk_ext -# - bitbake core-image-sato -c testsdkext +# To run eSDK tests, run the commands: +# $ bitbake <image-name> -c populate_sdk_ext +# $ bitbake <image-name> -c testsdkext +# +# where "<image-name>" is an image like core-image-sato. TEST_LOG_DIR ?= "${WORKDIR}/testimage" TESTSDKLOCK = "${TMPDIR}/testsdk.lock" diff --git a/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass index 3c56db887..cef26b19b 100644 --- a/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass +++ b/import-layers/yocto-poky/meta/classes/uboot-sign.bbclass @@ -68,8 +68,8 @@ do_concat_dtb () { [ -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then cd ${B} oe_runmake EXT_DTB=${DEPLOYDIR}/${UBOOT_DTB_IMAGE} - install ${S}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE} - install ${S}/${UBOOT_BINARY} ${DEPLOY_DIR_IMAGE}/${UBOOT_IMAGE} + install ${B}/${UBOOT_BINARY} ${DEPLOYDIR}/${UBOOT_IMAGE} + install ${B}/${UBOOT_BINARY} ${DEPLOY_DIR_IMAGE}/${UBOOT_IMAGE} elif [ -e "${DEPLOYDIR}/${UBOOT_NODTB_IMAGE}" -a -e "${DEPLOYDIR}/${UBOOT_DTB_IMAGE}" ]; then cd ${DEPLOYDIR} cat ${UBOOT_NODTB_IMAGE} ${UBOOT_DTB_IMAGE} | tee ${B}/${UBOOT_BINARY} > ${UBOOT_IMAGE} diff --git a/import-layers/yocto-poky/meta/classes/uninative.bbclass b/import-layers/yocto-poky/meta/classes/uninative.bbclass index 89cec07d7..975466929 100644 --- a/import-layers/yocto-poky/meta/classes/uninative.bbclass +++ b/import-layers/yocto-poky/meta/classes/uninative.bbclass @@ -1,4 +1,5 @@ -UNINATIVE_LOADER ?= "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}" +UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}" +UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}" UNINATIVE_URL ?= "unset" UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2" @@ -7,17 +8,6 @@ UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2" #UNINATIVE_CHECKSUM[x86_64] = "dead" UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/" -# https://wiki.debian.org/GCC5 -# We may see binaries built with gcc5 run or linked into gcc4 environment -# so use the older libstdc++ standard for now until we don't support gcc4 -# on the host system. -BUILD_CXXFLAGS_append = " -D_GLIBCXX_USE_CXX11_ABI=0" - -# -# icu configure defaults to CXX11 if no -std= option is passed in CXXFLAGS -# therefore pass one -BUILD_CXXFLAGS_append_pn-icu-native = " -std=c++98" - addhandler uninative_event_fetchloader uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted" @@ -69,7 +59,7 @@ python uninative_event_fetchloader() { if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath): os.symlink(localpath, tarballpath) - cmd = d.expand("mkdir -p ${STAGING_DIR}-uninative; cd ${STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${STAGING_DIR}-uninative/relocate_sdk.py ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum) + cmd = d.expand("mkdir -p ${UNINATIVE_STAGING_DIR}-uninative; cd ${UNINATIVE_STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${UNINATIVE_STAGING_DIR}-uninative/relocate_sdk.py ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum) subprocess.check_call(cmd, shell=True) with open(loaderchksum, "w") as f: @@ -99,9 +89,9 @@ def enable_uninative(d): loader = d.getVar("UNINATIVE_LOADER", True) if os.path.exists(loader): bb.debug(2, "Enabling uninative") - d.setVar("NATIVELSBSTRING", "universal") + d.setVar("NATIVELSBSTRING", "universal%s" % oe.utils.host_gcc_version(d)) d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp") - d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:") + d.prependVar("PATH", "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:") python uninative_changeinterp () { import subprocess diff --git a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass index 1fdd68131..65929e555 100644 --- a/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass +++ b/import-layers/yocto-poky/meta/classes/update-alternatives.bbclass @@ -195,8 +195,8 @@ python populate_packages_updatealternatives () { pkgdest = d.getVar('PKGD', True) for pkg in (d.getVar('PACKAGES', True) or "").split(): # Create post install/removal scripts - alt_setup_links = "" - alt_remove_links = "" + alt_setup_links = "# Begin section update-alternatives\n" + alt_remove_links = "# Begin section update-alternatives\n" for alt_name in (d.getVar('ALTERNATIVE_%s' % pkg, True) or "").split(): alt_link = d.getVarFlag('ALTERNATIVE_LINK_NAME', alt_name, True) alt_target = d.getVarFlag('ALTERNATIVE_TARGET_%s' % pkg, alt_name, True) or d.getVarFlag('ALTERNATIVE_TARGET', alt_name, True) @@ -219,10 +219,13 @@ python populate_packages_updatealternatives () { # Default to generate shell script.. eventually we may want to change this... alt_target = os.path.normpath(alt_target) - alt_setup_links += '\tupdate-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority) - alt_remove_links += '\tupdate-alternatives --remove %s %s\n' % (alt_name, alt_target) + alt_setup_links += 'update-alternatives --install %s %s %s %s\n' % (alt_link, alt_name, alt_target, alt_priority) + alt_remove_links += 'update-alternatives --remove %s %s\n' % (alt_name, alt_target) - if alt_setup_links: + alt_setup_links += "# End section update-alternatives\n" + alt_remove_links += "# End section update-alternatives\n" + + if len(alt_setup_links.splitlines()) > 2: # RDEPENDS setup provider = d.getVar('VIRTUAL-RUNTIME_update-alternatives', True) if provider: @@ -232,12 +235,24 @@ python populate_packages_updatealternatives () { bb.note('adding update-alternatives calls to postinst/prerm for %s' % pkg) bb.note('%s' % alt_setup_links) postinst = d.getVar('pkg_postinst_%s' % pkg, True) or '#!/bin/sh\n' - postinst += alt_setup_links + postinst = postinst.splitlines(True) + try: + index = postinst.index('# Begin section update-rc.d\n') + postinst.insert(index, alt_setup_links) + except ValueError: + postinst.append(alt_setup_links) + postinst = ''.join(postinst) d.setVar('pkg_postinst_%s' % pkg, postinst) bb.note('%s' % alt_remove_links) prerm = d.getVar('pkg_prerm_%s' % pkg, True) or '#!/bin/sh\n' - prerm += alt_remove_links + prerm = prerm.splitlines(True) + try: + index = prerm.index('# End section update-rc.d\n') + prerm.insert(index + 1, alt_remove_links) + except ValueError: + prerm.append(alt_remove_links) + prerm = ''.join(prerm) d.setVar('pkg_prerm_%s' % pkg, prerm) } diff --git a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass index 321924bb3..18df2dc3f 100644 --- a/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass +++ b/import-layers/yocto-poky/meta/classes/update-rc.d.bbclass @@ -26,6 +26,7 @@ fi } updatercd_postinst() { +# Begin section update-rc.d if type update-rc.d >/dev/null 2>/dev/null; then if [ -n "$D" ]; then OPT="-r $D" @@ -34,12 +35,15 @@ if type update-rc.d >/dev/null 2>/dev/null; then fi update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS} fi +# End section update-rc.d } updatercd_prerm() { +# Begin section update-rc.d if [ -z "$D" -a -x "${INIT_D_DIR}/${INITSCRIPT_NAME}" ]; then ${INIT_D_DIR}/${INITSCRIPT_NAME} stop || : fi +# End section update-rc.d } updatercd_postrm() { @@ -102,13 +106,25 @@ python populate_packages_updatercd () { postinst = d.getVar('pkg_postinst_%s' % pkg, True) if not postinst: postinst = '#!/bin/sh\n' - postinst += localdata.getVar('updatercd_postinst', True) + postinst = postinst.splitlines(True) + try: + index = postinst.index('# End section update-alternatives\n') + postinst.insert(index + 1, localdata.getVar('updatercd_postinst', True)) + except ValueError: + postinst.append(localdata.getVar('updatercd_postinst', True)) + postinst = ''.join(postinst) d.setVar('pkg_postinst_%s' % pkg, postinst) prerm = d.getVar('pkg_prerm_%s' % pkg, True) if not prerm: prerm = '#!/bin/sh\n' - prerm += localdata.getVar('updatercd_prerm', True) + prerm = prerm.splitlines(True) + try: + index = prerm.index('# Begin section update-alternatives\n') + prerm.insert(index, localdata.getVar('updatercd_prerm', True)) + except ValueError: + prerm.append(localdata.getVar('updatercd_prerm', True)) + prerm = ''.join(prerm) d.setVar('pkg_prerm_%s' % pkg, prerm) postrm = d.getVar('pkg_postrm_%s' % pkg, True) diff --git a/import-layers/yocto-poky/meta/conf/bitbake.conf b/import-layers/yocto-poky/meta/conf/bitbake.conf index 54a587f1b..6e767b1bc 100644 --- a/import-layers/yocto-poky/meta/conf/bitbake.conf +++ b/import-layers/yocto-poky/meta/conf/bitbake.conf @@ -408,6 +408,11 @@ OLDEST_KERNEL = "3.2.0" OLDEST_KERNEL_aarch64 = "3.14" OLDEST_KERNEL_nios2 = "3.19" +# SDK_OLDEST_KERNEL can't be set using overrides since there are +# none for the SDK architecture. Best to set it from a machine-sdk +# include file if you need an SDK arch-specific value +SDK_OLDEST_KERNEL = "3.2.0" + # Define where the kernel headers are installed on the target as well as where # they are staged. KERNEL_SRC_PATH = "/usr/src/kernel" @@ -580,7 +585,7 @@ BBLAYERS_FETCH_DIR ??= "${COREBASE}" ################################################################## APACHE_MIRROR = "http://archive.apache.org/dist" -DEBIAN_MIRROR = "ftp://ftp.debian.org/debian/pool" +DEBIAN_MIRROR = "http://ftp.debian.org/debian/pool" GENTOO_MIRROR = "http://distfiles.gentoo.org/distfiles" GNOME_GIT = "git://git.gnome.org" GNOME_MIRROR = "http://ftp.gnome.org/pub/GNOME/sources" @@ -614,7 +619,7 @@ SRC_URI[vardepsexclude] += "\ " # You can use the mirror of your country to get faster downloads by putting -# export DEBIAN_MIRROR = "ftp://ftp.de.debian.org/debian/pool" +# export DEBIAN_MIRROR = "http://ftp.de.debian.org/debian/pool" # into your local.conf FETCHCMD_svn = "/usr/bin/env svn --non-interactive --trust-server-cert" @@ -705,6 +710,7 @@ require conf/abi_version.conf include conf/site.conf include conf/auto.conf include conf/local.conf +include conf/multiconfig/${BB_CURRENT_MC}.conf include conf/build/${BUILD_SYS}.conf include conf/target/${TARGET_SYS}.conf include conf/machine/${MACHINE}.conf diff --git a/import-layers/yocto-poky/meta/conf/distro/defaultsetup.conf b/import-layers/yocto-poky/meta/conf/distro/defaultsetup.conf index 71c65b146..aa21345a1 100644 --- a/import-layers/yocto-poky/meta/conf/distro/defaultsetup.conf +++ b/import-layers/yocto-poky/meta/conf/distro/defaultsetup.conf @@ -9,6 +9,8 @@ require conf/distro/include/tcmode-${TCMODE}.inc TCLIBC ?= "glibc" require conf/distro/include/tclibc-${TCLIBC}.inc +require conf/distro/include/uninative-flags.inc + # Allow single libc distros to disable this code TCLIBCAPPEND ?= "-${TCLIBC}" TMPDIR .= "${TCLIBCAPPEND}" @@ -20,4 +22,3 @@ PACKAGE_CLASSES ?= "package_ipk" INHERIT_BLACKLIST = "blacklist" INHERIT_DISTRO ?= "debian devshell sstate license" INHERIT += "${PACKAGE_CLASSES} ${USER_CLASSES} ${INHERIT_DISTRO} ${INHERIT_BLACKLIST}" - diff --git a/import-layers/yocto-poky/meta/conf/distro/include/uninative-flags.inc b/import-layers/yocto-poky/meta/conf/distro/include/uninative-flags.inc new file mode 100644 index 000000000..e9f82c39e --- /dev/null +++ b/import-layers/yocto-poky/meta/conf/distro/include/uninative-flags.inc @@ -0,0 +1,9 @@ +# https://wiki.debian.org/GCC5 +# We may see binaries built with gcc5 run or linked into gcc4 environment +# so use the older libstdc++ standard for now until we don't support gcc4 +# on the host system. +BUILD_CXXFLAGS_append = " -D_GLIBCXX_USE_CXX11_ABI=0" + +# icu configure defaults to CXX11 if no -std= option is passed in CXXFLAGS +# therefore pass one +BUILD_CXXFLAGS_append_pn-icu-native = " -std=c++98" diff --git a/import-layers/yocto-poky/meta/conf/distro/include/yocto-uninative.inc b/import-layers/yocto-poky/meta/conf/distro/include/yocto-uninative.inc index 975edec9c..839c19aa5 100644 --- a/import-layers/yocto-poky/meta/conf/distro/include/yocto-uninative.inc +++ b/import-layers/yocto-poky/meta/conf/distro/include/yocto-uninative.inc @@ -6,6 +6,6 @@ # to the distro running on the build machine. # -UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/1.4/" -UNINATIVE_CHECKSUM[i686] ?= "b4bc60511ce479736808273ffa043df4ed2a225407dd7ca150ae6220d9ce76d5" -UNINATIVE_CHECKSUM[x86_64] ?= "101ff8f2580c193488db9e76f9646fb6ed38b65fb76f403acb0e2178ce7127ca" +UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/1.7/" +UNINATIVE_CHECKSUM[i686] ?= "d7c341460035936c19d63fe02f354ef1bc993c62d694ae3a31458d1c6997f0c5" +UNINATIVE_CHECKSUM[x86_64] ?= "ed033c868b87852b07957a4400f3b744c00aef5d6470346ea1a59b6d3e03075e" diff --git a/import-layers/yocto-poky/meta/conf/machine-sdk/i586.conf b/import-layers/yocto-poky/meta/conf/machine-sdk/i586.conf index 41e5e159d..99083fb63 100644 --- a/import-layers/yocto-poky/meta/conf/machine-sdk/i586.conf +++ b/import-layers/yocto-poky/meta/conf/machine-sdk/i586.conf @@ -1,4 +1,5 @@ SDK_ARCH = "i586" SDK_CC_ARCH = "-march=i586" ABIEXTENSION_class-nativesdk = "" +SDK_OLDEST_KERNEL = "2.6.32" diff --git a/import-layers/yocto-poky/meta/conf/machine-sdk/i686.conf b/import-layers/yocto-poky/meta/conf/machine-sdk/i686.conf index fe406972c..cf22784e9 100644 --- a/import-layers/yocto-poky/meta/conf/machine-sdk/i686.conf +++ b/import-layers/yocto-poky/meta/conf/machine-sdk/i686.conf @@ -1,3 +1,4 @@ SDK_ARCH = "i686" SDK_CC_ARCH = "-march=i686" ABIEXTENSION_class-nativesdk = "" +SDK_OLDEST_KERNEL = "2.6.32" diff --git a/import-layers/yocto-poky/meta/conf/machine-sdk/x86_64.conf b/import-layers/yocto-poky/meta/conf/machine-sdk/x86_64.conf index 61439b4fb..7d2e71780 100644 --- a/import-layers/yocto-poky/meta/conf/machine-sdk/x86_64.conf +++ b/import-layers/yocto-poky/meta/conf/machine-sdk/x86_64.conf @@ -1,2 +1,3 @@ SDK_ARCH = "x86_64" ABIEXTENSION_class-nativesdk = "" +SDK_OLDEST_KERNEL = "2.6.32" diff --git a/import-layers/yocto-poky/meta/conf/machine/include/arm/arch-arm64.inc b/import-layers/yocto-poky/meta/conf/machine/include/arm/arch-arm64.inc index 944069836..9eeffac81 100644 --- a/import-layers/yocto-poky/meta/conf/machine/include/arm/arch-arm64.inc +++ b/import-layers/yocto-poky/meta/conf/machine/include/arm/arch-arm64.inc @@ -1,6 +1,6 @@ DEFAULTTUNE ?= "aarch64" -require conf/machine/include/arm/arch-armv7a.inc +require conf/machine/include/arm/arch-armv7ve.inc TUNEVALID[aarch64] = "Enable instructions for aarch64" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemuarm.conf b/import-layers/yocto-poky/meta/conf/machine/qemuarm.conf index 17402ef3b..f9d6dd7e3 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemuarm.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemuarm.conf @@ -8,7 +8,7 @@ require conf/machine/include/tune-arm926ejs.inc KERNEL_IMAGETYPE = "zImage" -SERIAL_CONSOLES = "115200;ttyAMA0 115200;ttyAMA1" +SERIAL_CONSOLES ?= "115200;ttyAMA0 115200;ttyAMA1" # For runqemu QB_SYSTEM_NAME = "qemu-system-arm" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemuarm64.conf b/import-layers/yocto-poky/meta/conf/machine/qemuarm64.conf index d0750624a..e70538aac 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemuarm64.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemuarm64.conf @@ -7,7 +7,7 @@ require conf/machine/include/qemu.inc KERNEL_IMAGETYPE = "Image" -SERIAL_CONSOLES = "38400;ttyAMA0 38400;hvc0" +SERIAL_CONSOLES ?= "38400;ttyAMA0 38400;hvc0" # For runqemu QB_SYSTEM_NAME = "qemu-system-aarch64" @@ -16,7 +16,7 @@ QB_MACHINE = "-machine virt" QB_CPU = "-cpu cortex-a57" QB_KERNEL_CMDLINE_APPEND = "console=ttyAMA0,38400" # Add the 'virtio-rng-pci' device otherwise the guest may run out of entropy -QB_OPT_APPEND = "-show-cursor -device virtio-rng-pci" +QB_OPT_APPEND = "-show-cursor -device virtio-rng-pci -monitor null" QB_TAP_OPT = "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no -device virtio-net-device,netdev=net0,mac=@MAC@" QB_SLIRP_OPT = "-netdev user,id=net0 -device virtio-net-device,netdev=net0" QB_ROOTFS_OPT = "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemumips.conf b/import-layers/yocto-poky/meta/conf/machine/qemumips.conf index 3182ea1b5..986315a64 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemumips.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemumips.conf @@ -9,6 +9,6 @@ require conf/machine/include/qemuboot-mips.inc KERNEL_IMAGETYPE = "vmlinux" KERNEL_ALT_IMAGETYPE = "vmlinux.bin" -SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemumips64.conf b/import-layers/yocto-poky/meta/conf/machine/qemumips64.conf index 9529f4c4f..3f91cbe17 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemumips64.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemumips64.conf @@ -9,6 +9,6 @@ require conf/machine/include/qemuboot-mips.inc KERNEL_IMAGETYPE = "vmlinux" KERNEL_ALT_IMAGETYPE = "vmlinux.bin" -SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" MACHINE_EXTRA_RRECOMMENDS = " kernel-modules" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemuppc.conf b/import-layers/yocto-poky/meta/conf/machine/qemuppc.conf index 8703c2086..9d174bc43 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemuppc.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemuppc.conf @@ -9,7 +9,7 @@ TARGET_CC_KERNEL_ARCH = "-mno-spe" KERNEL_IMAGETYPE = "vmlinux" -SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" # For runqemu QB_SYSTEM_NAME = "qemu-system-ppc" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemux86-64.conf b/import-layers/yocto-poky/meta/conf/machine/qemux86-64.conf index b191ddd3c..4f30033e5 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemux86-64.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemux86-64.conf @@ -14,7 +14,7 @@ require conf/machine/include/qemuboot-x86.inc KERNEL_IMAGETYPE = "bzImage" -SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" XSERVER = "xserver-xorg \ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast xserver-xorg-extension-glx', '', d)} \ @@ -31,3 +31,6 @@ XSERVER = "xserver-xorg \ MACHINE_FEATURES += "x86" MACHINE_ESSENTIAL_EXTRA_RDEPENDS += "v86d" + +WKS_FILE ?= "directdisk.wks" +do_image_wic[depends] += "syslinux:do_build syslinux-native:do_populate_sysroot mtools-native:do_populate_sysroot dosfstools-native:do_populate_sysroot" diff --git a/import-layers/yocto-poky/meta/conf/machine/qemux86.conf b/import-layers/yocto-poky/meta/conf/machine/qemux86.conf index 8555fd6bf..e232947ae 100644 --- a/import-layers/yocto-poky/meta/conf/machine/qemux86.conf +++ b/import-layers/yocto-poky/meta/conf/machine/qemux86.conf @@ -13,7 +13,7 @@ require conf/machine/include/qemuboot-x86.inc KERNEL_IMAGETYPE = "bzImage" -SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" +SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" XSERVER = "xserver-xorg \ ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast xserver-xorg-extension-glx', '', d)} \ @@ -30,3 +30,6 @@ XSERVER = "xserver-xorg \ MACHINE_FEATURES += "x86" MACHINE_ESSENTIAL_EXTRA_RDEPENDS += "v86d" + +WKS_FILE = "directdisk.wks" +do_image_wic[depends] += "syslinux:do_build syslinux-native:do_populate_sysroot mtools-native:do_populate_sysroot dosfstools-native:do_populate_sysroot" diff --git a/import-layers/yocto-poky/meta/files/deploydir_readme.txt b/import-layers/yocto-poky/meta/files/deploydir_readme.txt deleted file mode 100644 index 97ec1855f..000000000 --- a/import-layers/yocto-poky/meta/files/deploydir_readme.txt +++ /dev/null @@ -1,8 +0,0 @@ -Files in the deploy directory will not be re-created automatically if you -delete them. If you do delete a file, you will need to run: - - bitbake -c clean TARGET - bitbake TARGET - -where TARGET is the name of the appropriate package or target e.g. -"virtual/kernel" for the kernel, an image, etc. diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py index afaff6859..29ac6d418 100644 --- a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py +++ b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py @@ -4,11 +4,15 @@ import stat import shutil def _smart_copy(src, dest): + import subprocess # smart_copy will choose the correct function depending on whether the # source is a file or a directory. mode = os.stat(src).st_mode if stat.S_ISDIR(mode): - shutil.copytree(src, dest, symlinks=True, ignore=shutil.ignore_patterns('.git')) + bb.utils.mkdirhier(dest) + cmd = "tar --exclude='.git' --xattrs --xattrs-include='*' -chf - -C %s -p . \ + | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dest) + subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) else: shutil.copyfile(src, dest) shutil.copymode(src, dest) diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py index 38eb0cb13..ba61f9890 100644 --- a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py +++ b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py @@ -10,6 +10,7 @@ class LocalSigner(object): self.gpg_bin = d.getVar('GPG_BIN', True) or \ bb.utils.which(os.getenv('PATH'), 'gpg') self.gpg_path = d.getVar('GPG_PATH', True) + self.gpg_version = self.get_gpg_version() self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpm") def export_pubkey(self, output_file, keyid, armor=True): @@ -31,15 +32,18 @@ class LocalSigner(object): cmd = self.rpm_bin + " --addsign --define '_gpg_name %s' " % keyid cmd += "--define '_gpg_passphrase %s' " % passphrase + if self.gpg_version > (2,1,): + cmd += "--define '_gpg_sign_cmd_extra_args --pinentry-mode=loopback' " if self.gpg_bin: cmd += "--define '%%__gpg %s' " % self.gpg_bin if self.gpg_path: cmd += "--define '_gpg_path %s' " % self.gpg_path - cmd += ' '.join(files) - status, output = oe.utils.getstatusoutput(cmd) - if status: - raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output) + # Sign in chunks of 100 packages + for i in range(0, len(files), 100): + status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+100])) + if status: + raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output) def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True): """Create a detached signature of a file""" @@ -58,9 +62,7 @@ class LocalSigner(object): #gpg > 2.1 supports password pipes only through the loopback interface #gpg < 2.1 errors out if given unknown parameters - dots = self.get_gpg_version().split('.') - assert len(dots) >= 2 - if int(dots[0]) >= 2 and int(dots[1]) >= 1: + if self.gpg_version > (2,1,): cmd += ['--pinentry-mode', 'loopback'] cmd += [input_file] @@ -87,10 +89,11 @@ class LocalSigner(object): def get_gpg_version(self): - """Return the gpg version""" + """Return the gpg version as a tuple of ints""" import subprocess try: - return subprocess.check_output((self.gpg_bin, "--version")).split()[2].decode("utf-8") + ver_str = subprocess.check_output((self.gpg_bin, "--version")).split()[2].decode("utf-8") + return tuple([int(i) for i in ver_str.split('.')]) except subprocess.CalledProcessError as e: raise bb.build.FuncFailed("Could not get gpg version: %s" % e) diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py index 3cee9730a..13577b18b 100644 --- a/import-layers/yocto-poky/meta/lib/oe/package_manager.py +++ b/import-layers/yocto-poky/meta/lib/oe/package_manager.py @@ -1673,13 +1673,15 @@ class OpkgPM(OpkgDpkgPM): self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True), arch)) - if self.opkg_dir != '/var/lib/opkg': + if self.d.getVar('OPKGLIBDIR', True) != '/var/lib': # There is no command line option for this anymore, we need to add # info_dir and status_file to config file, if OPKGLIBDIR doesn't have # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info')) + cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'lists')) cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status')) @@ -1698,13 +1700,15 @@ class OpkgPM(OpkgDpkgPM): config_file.write("src oe-%s file:%s\n" % (arch, pkgs_dir)) - if self.opkg_dir != '/var/lib/opkg': + if self.d.getVar('OPKGLIBDIR', True) != '/var/lib': # There is no command line option for this anymore, we need to add # info_dir and status_file to config file, if OPKGLIBDIR doesn't have # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR "/var/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE "/var/lib/opkg/status" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info')) + config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'lists')) config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status')) def insert_feeds_uris(self): @@ -1776,7 +1780,7 @@ class OpkgPM(OpkgDpkgPM): def remove(self, pkgs, with_dependencies=True): if with_dependencies: - cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \ + cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \ (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) else: cmd = "%s %s --force-depends remove %s" % \ @@ -1860,7 +1864,10 @@ class OpkgPM(OpkgDpkgPM): # Create an temp dir as opkg root for dummy installation temp_rootfs = self.d.expand('${T}/opkg') - temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg') + opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg') bb.utils.mkdirhier(temp_opkg_dir) opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py index 06a5af265..ed7fd1eef 100644 --- a/import-layers/yocto-poky/meta/lib/oe/path.py +++ b/import-layers/yocto-poky/meta/lib/oe/path.py @@ -83,12 +83,14 @@ def copyhardlinktree(src, dst): if os.path.isdir(src): import glob if len(glob.glob('%s/.??*' % src)) > 0: - source = '%s/.??* ' % src - source = source + '%s/*' % src + source = './.??* ' + source += './*' + s_dir = src else: source = src - cmd = 'cp -afl --preserve=xattr %s %s' % (source, dst) - subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + s_dir = os.getcwd() + cmd = 'cp -afl --preserve=xattr %s %s' % (source, os.path.realpath(dst)) + subprocess.check_output(cmd, shell=True, cwd=s_dir, stderr=subprocess.STDOUT) else: copytree(src, dst) diff --git a/import-layers/yocto-poky/meta/lib/oe/qa.py b/import-layers/yocto-poky/meta/lib/oe/qa.py index fbe719d8e..22d76dcbc 100644 --- a/import-layers/yocto-poky/meta/lib/oe/qa.py +++ b/import-layers/yocto-poky/meta/lib/oe/qa.py @@ -1,4 +1,4 @@ -import os, struct +import os, struct, mmap class NotELFFileError(Exception): pass @@ -23,9 +23,9 @@ class ELFFile: EV_CURRENT = 1 # possible values for EI_DATA - ELFDATANONE = 0 - ELFDATA2LSB = 1 - ELFDATA2MSB = 2 + EI_DATA_NONE = 0 + EI_DATA_LSB = 1 + EI_DATA_MSB = 2 PT_INTERP = 3 @@ -34,51 +34,46 @@ class ELFFile: #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name) raise NotELFFileError("%s is not an ELF" % self.name) - def __init__(self, name, bits = 0): + def __init__(self, name): self.name = name - self.bits = bits self.objdump_output = {} - def open(self): - if not os.path.isfile(self.name): - raise NotELFFileError("%s is not a normal file" % self.name) + # Context Manager functions to close the mmap explicitly + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.data.close() + def open(self): with open(self.name, "rb") as f: - # Read 4k which should cover most of the headers we're after - self.data = f.read(4096) + try: + self.data = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) + except ValueError: + # This means the file is empty + raise NotELFFileError("%s is empty" % self.name) + # Check the file has the minimum number of ELF table entries if len(self.data) < ELFFile.EI_NIDENT + 4: raise NotELFFileError("%s is not an ELF" % self.name) + # ELF header self.my_assert(self.data[0], 0x7f) self.my_assert(self.data[1], ord('E')) self.my_assert(self.data[2], ord('L')) self.my_assert(self.data[3], ord('F')) - if self.bits == 0: - if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32: - self.bits = 32 - elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64: - self.bits = 64 - else: - # Not 32-bit or 64.. lets assert - raise NotELFFileError("ELF but not 32 or 64 bit.") - elif self.bits == 32: - self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS32) - elif self.bits == 64: - self.my_assert(self.data[ELFFile.EI_CLASS], ELFFile.ELFCLASS64) + if self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS32: + self.bits = 32 + elif self.data[ELFFile.EI_CLASS] == ELFFile.ELFCLASS64: + self.bits = 64 else: - raise NotELFFileError("Must specify unknown, 32 or 64 bit size.") + # Not 32-bit or 64.. lets assert + raise NotELFFileError("ELF but not 32 or 64 bit.") self.my_assert(self.data[ELFFile.EI_VERSION], ELFFile.EV_CURRENT) - self.sex = self.data[ELFFile.EI_DATA] - if self.sex == ELFFile.ELFDATANONE: - raise NotELFFileError("self.sex == ELFDATANONE") - elif self.sex == ELFFile.ELFDATA2LSB: - self.sex = "<" - elif self.sex == ELFFile.ELFDATA2MSB: - self.sex = ">" - else: - raise NotELFFileError("Unknown self.sex") + self.endian = self.data[ELFFile.EI_DATA] + if self.endian not in (ELFFile.EI_DATA_LSB, ELFFile.EI_DATA_MSB): + raise NotELFFileError("Unexpected EI_DATA %x" % self.endian) def osAbi(self): return self.data[ELFFile.EI_OSABI] @@ -90,16 +85,20 @@ class ELFFile: return self.bits def isLittleEndian(self): - return self.sex == "<" + return self.endian == ELFFile.EI_DATA_LSB def isBigEndian(self): - return self.sex == ">" + return self.endian == ELFFile.EI_DATA_MSB + + def getStructEndian(self): + return {ELFFile.EI_DATA_LSB: "<", + ELFFile.EI_DATA_MSB: ">"}[self.endian] def getShort(self, offset): - return struct.unpack_from(self.sex+"H", self.data, offset)[0] + return struct.unpack_from(self.getStructEndian() + "H", self.data, offset)[0] def getWord(self, offset): - return struct.unpack_from(self.sex+"i", self.data, offset)[0] + return struct.unpack_from(self.getStructEndian() + "i", self.data, offset)[0] def isDynamic(self): """ @@ -118,7 +117,7 @@ class ELFFile: def machine(self): """ - We know the sex stored in self.sex and we + We know the endian stored in self.endian and we know the position """ return self.getShort(ELFFile.E_MACHINE) @@ -166,6 +165,7 @@ def elf_machine_to_string(machine): if __name__ == "__main__": import sys - elf = ELFFile(sys.argv[1]) - elf.open() - print(elf.isDynamic()) + + with ELFFile(sys.argv[1]) as elf: + elf.open() + print(elf.isDynamic()) diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py index a348b975c..f96788399 100644 --- a/import-layers/yocto-poky/meta/lib/oe/rootfs.py +++ b/import-layers/yocto-poky/meta/lib/oe/rootfs.py @@ -186,10 +186,6 @@ class Rootfs(object, metaclass=ABCMeta): shutil.copytree(postinst_intercepts_dir, intercepts_dir) - shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"), - self.deploydir + - "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt") - execute_pre_post_process(self.d, pre_process_cmds) if self.progress_reporter: @@ -477,8 +473,6 @@ class RpmRootfs(Rootfs): execute_pre_post_process(self.d, rpm_post_process_cmds) - self._log_check() - if self.inc_rpm_image_gen == "1": self.pm.backup_packaging_data() @@ -951,7 +945,9 @@ class OpkgRootfs(DpkgOpkgRootfs): if self.progress_reporter: self.progress_reporter.next_stage() - self._setup_dbg_rootfs(['/etc', '/var/lib/opkg', '/usr/lib/ssl']) + opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True) + opkg_dir = os.path.join(opkg_lib_dir, 'opkg') + self._setup_dbg_rootfs(['/etc', opkg_dir, '/usr/lib/ssl']) execute_pre_post_process(self.d, opkg_post_process_cmds) diff --git a/import-layers/yocto-poky/meta/lib/oe/terminal.py b/import-layers/yocto-poky/meta/lib/oe/terminal.py index 3901ad3f2..3c8ef59a4 100644 --- a/import-layers/yocto-poky/meta/lib/oe/terminal.py +++ b/import-layers/yocto-poky/meta/lib/oe/terminal.py @@ -227,6 +227,8 @@ def spawn(name, sh_cmd, title=None, env=None, d=None): pipe = terminal(sh_cmd, title, env, d) output = pipe.communicate()[0] + if output: + output = output.decode("utf-8") if pipe.returncode != 0: raise ExecutionError(sh_cmd, pipe.returncode, output) diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py index d6545b197..36cf74f29 100644 --- a/import-layers/yocto-poky/meta/lib/oe/utils.py +++ b/import-layers/yocto-poky/meta/lib/oe/utils.py @@ -230,6 +230,25 @@ def format_pkg_list(pkg_dict, ret_format=None): return '\n'.join(output) +def host_gcc_version(d): + import re, subprocess + + compiler = d.getVar("BUILD_CC", True) + + try: + env = os.environ.copy() + env["PATH"] = d.getVar("PATH", True) + output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8"))) + + match = re.match(".* (\d\.\d)\.\d.*", output.split('\n')[0]) + if not match: + bb.fatal("Can't get compiler version from %s --version output" % compiler) + + version = match.group(1) + return "-%s" % version if version in ("4.8", "4.9") else "" + # # Python 2.7 doesn't have threaded pools (just multiprocessing) # so implement a version here diff --git a/import-layers/yocto-poky/meta/lib/oeqa/controllers/testtargetloader.py b/import-layers/yocto-poky/meta/lib/oeqa/controllers/testtargetloader.py index a1b7b1d92..b51d04b21 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/controllers/testtargetloader.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/controllers/testtargetloader.py @@ -61,8 +61,6 @@ class TestTargetLoader: obj = getattr(module, target) if obj: from oeqa.targetcontrol import BaseTarget - if (not isinstance(obj, (type, types.ClassType))): - bb.warn("Target {0} found, but not of type Class".format(target)) if( not issubclass(obj, BaseTarget)): bb.warn("Target {0} found, but subclass is not BaseTarget".format(target)) except: diff --git a/import-layers/yocto-poky/meta/lib/oeqa/runtime/parselogs.py b/import-layers/yocto-poky/meta/lib/oeqa/runtime/parselogs.py index 8efe2d1de..aa5008bba 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/runtime/parselogs.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/runtime/parselogs.py @@ -43,6 +43,7 @@ common_errors = [ "controller can't do DEVSLP, turning off", "stmmac_dvr_probe: warning: cannot get CSR clock", "error: couldn\'t mount because of unsupported optional features", + "GPT: Use GNU Parted to correct GPT errors", ] video_related = [ @@ -58,6 +59,7 @@ x86_common = [ 'failed to setup card detect gpio', 'amd_nb: Cannot enumerate AMD northbridges', 'failed to retrieve link info, disabling eDP', + 'Direct firmware load for iwlwifi', ] + common_errors qemux86_common = [ @@ -69,7 +71,7 @@ qemux86_common = [ 'tsc: HPET/PMTIMER calibration failed', ] + common_errors -ignore_errors = { +ignore_errors = { 'default' : common_errors, 'qemux86' : [ 'Failed to access perfctr msr (MSR', @@ -140,6 +142,7 @@ ignore_errors = { 'Failed to load firmware i915', 'Failed to fetch GuC', 'Failed to initialize GuC', + 'Failed to load DMC firmware', 'The driver is built-in, so to load the firmware you need to', ] + x86_common, 'edgerouter' : [ @@ -200,7 +203,7 @@ class ParseLogsTest(oeRuntimeTest): hwi += "*******************************\n" return hwi - #go through the log locations provided and if it's a folder create a list with all the .log files in it, if it's a file just add + #go through the log locations provided and if it's a folder create a list with all the .log files in it, if it's a file just add #it to that list def getLogList(self, log_locations): logs = [] diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/bbtests.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/bbtests.py index baae1e0e5..4ce935fc1 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/bbtests.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/bbtests.py @@ -37,7 +37,6 @@ class BitbakeTests(oeSelfTest): @testcase(103) def test_local_sstate(self): - bitbake('m4-native -ccleansstate') bitbake('m4-native') bitbake('m4-native -cclean') result = bitbake('m4-native') @@ -83,8 +82,8 @@ class BitbakeTests(oeSelfTest): pkgsplit_dir = get_bb_var('PKGDEST', test_recipe) man_dir = get_bb_var('mandir', test_recipe) - bitbake('-c cleansstate %s' % test_recipe) - bitbake(test_recipe) + bitbake('-c clean %s' % test_recipe) + bitbake('-c package -f %s' % test_recipe) self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe) man_file = os.path.join(image_dir + man_dir, 'man3/zlib.3') @@ -103,7 +102,6 @@ class BitbakeTests(oeSelfTest): # test 2 from bug 5875 test_recipe = 'zlib' - bitbake('-c cleansstate %s' % test_recipe) bitbake(test_recipe) self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe) @@ -221,7 +219,7 @@ INHERIT_remove = \"report-error\" self.track_for_cleanup(os.path.join(self.builddir, "download-selftest")) self.write_recipeinc('man',"\ndo_fail_task () {\nexit 1 \n}\n\naddtask do_fail_task before do_fetch\n" ) runCmd('bitbake -c cleanall man xcursor-transparent-theme') - result = runCmd('bitbake man xcursor-transparent-theme -k', ignore_status=True) + result = runCmd('bitbake -c unpack -k man xcursor-transparent-theme', ignore_status=True) errorpos = result.output.find('ERROR: Function failed: do_fail_task') manver = re.search("NOTE: recipe xcursor-transparent-theme-(.*?): task do_unpack: Started", result.output) continuepos = result.output.find('NOTE: recipe xcursor-transparent-theme-%s: task do_unpack: Started' % manver.group(1)) diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py index 9487898b0..47549550c 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/buildoptions.py @@ -35,9 +35,8 @@ class ImageOptionsTests(oeSelfTest): bitbake("ccache-native") self.assertTrue(os.path.isfile(os.path.join(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'), "ccache")), msg = "No ccache found under %s" % str(get_bb_var('STAGING_BINDIR_NATIVE', 'ccache-native'))) self.write_config('INHERIT += "ccache"') - bitbake("m4 -c cleansstate") - bitbake("m4 -c compile") - self.addCleanup(bitbake, 'ccache-native -ccleansstate') + self.add_command_to_tearDown('bitbake -c clean m4') + bitbake("m4 -f -c compile") res = runCmd("grep ccache %s" % (os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")), ignore_status=True) self.assertEqual(0, res.status, msg="No match for ccache in m4 log.do_compile. For further details: %s" % os.path.join(get_bb_var("WORKDIR","m4"),"temp/log.do_compile")) @@ -71,14 +70,14 @@ class SanityOptionsTest(oeSelfTest): @testcase(927) def test_options_warnqa_errorqa_switch(self): - bitbake("xcursor-transparent-theme -ccleansstate") self.write_config("INHERIT_remove = \"report-error\"") if "packages-list" not in get_bb_var("ERROR_QA"): self.append_config("ERROR_QA_append = \" packages-list\"") self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') - res = bitbake("xcursor-transparent-theme", ignore_status=True) + self.add_command_to_tearDown('bitbake -c clean xcursor-transparent-theme') + res = bitbake("xcursor-transparent-theme -f -c package", ignore_status=True) self.delete_recipeinc('xcursor-transparent-theme') line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.") self.assertTrue(line and line.startswith("ERROR:"), msg=res.output) @@ -86,8 +85,7 @@ class SanityOptionsTest(oeSelfTest): self.write_recipeinc('xcursor-transparent-theme', 'PACKAGES += \"${PN}-dbg\"') self.append_config('ERROR_QA_remove = "packages-list"') self.append_config('WARN_QA_append = " packages-list"') - bitbake("xcursor-transparent-theme -ccleansstate") - res = bitbake("xcursor-transparent-theme") + res = bitbake("xcursor-transparent-theme -f -c package") self.delete_recipeinc('xcursor-transparent-theme') line = self.getline(res, "QA Issue: xcursor-transparent-theme-dbg is listed in PACKAGES multiple times, this leads to packaging errors.") self.assertTrue(line and line.startswith("WARNING:"), msg=res.output) @@ -96,8 +94,8 @@ class SanityOptionsTest(oeSelfTest): def test_sanity_unsafe_script_references(self): self.write_config('WARN_QA_append = " unsafe-references-in-scripts"') - bitbake("-ccleansstate gzip") - res = bitbake("gzip") + self.add_command_to_tearDown('bitbake -c clean gzip') + res = bitbake("gzip -f -c package_qa") line = self.getline(res, "QA Issue: gzip") self.assertFalse(line, "WARNING: QA Issue: gzip message is present in bitbake's output and shouldn't be: %s" % res.output) @@ -106,29 +104,10 @@ do_install_append_pn-gzip () { echo "\n${bindir}/test" >> ${D}${bindir}/zcat } """) - res = bitbake("gzip") + res = bitbake("gzip -f -c package_qa") line = self.getline(res, "QA Issue: gzip") self.assertTrue(line and line.startswith("WARNING:"), "WARNING: QA Issue: gzip message is not present in bitbake's output: %s" % res.output) - @testcase(1434) - def test_sanity_unsafe_binary_references(self): - self.write_config('WARN_QA_append = " unsafe-references-in-binaries"') - - bitbake("-ccleansstate nfs-utils") - #res = bitbake("nfs-utils") - # FIXME when nfs-utils passes this test - #line = self.getline(res, "QA Issue: nfs-utils") - #self.assertFalse(line, "WARNING: QA Issue: nfs-utils message is present in bitbake's output and shouldn't be: %s" % res.output) - -# self.append_config(""" -#do_install_append_pn-nfs-utils () { -# echo "\n${bindir}/test" >> ${D}${base_sbindir}/osd_login -#} -#""") - res = bitbake("nfs-utils") - line = self.getline(res, "QA Issue: nfs-utils") - self.assertTrue(line and line.startswith("WARNING:"), "WARNING: QA Issue: nfs-utils message is not present in bitbake's output: %s" % res.output) - @testcase(1421) def test_layer_without_git_dir(self): """ diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/devtool.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/devtool.py index e992dcf77..302ec5d42 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/devtool.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/devtool.py @@ -9,7 +9,8 @@ import fnmatch import oeqa.utils.ftools as ftools from oeqa.selftest.base import oeSelfTest -from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer, runqemu, get_test_layer +from oeqa.utils.commands import runCmd, bitbake, get_bb_var, create_temp_layer +from oeqa.utils.commands import get_bb_vars, runqemu, get_test_layer from oeqa.utils.decorators import testcase class DevtoolBase(oeSelfTest): @@ -114,6 +115,20 @@ class DevtoolBase(oeSelfTest): class DevtoolTests(DevtoolBase): + @classmethod + def setUpClass(cls): + bb_vars = get_bb_vars(['TOPDIR', 'SSTATE_DIR']) + cls.original_sstate = bb_vars['SSTATE_DIR'] + cls.devtool_sstate = os.path.join(bb_vars['TOPDIR'], 'sstate_devtool') + cls.sstate_conf = 'SSTATE_DIR = "%s"\n' % cls.devtool_sstate + cls.sstate_conf += ('SSTATE_MIRRORS += "file://.* file:///%s/PATH"\n' + % cls.original_sstate) + + @classmethod + def tearDownClass(cls): + cls.log.debug('Deleting devtool sstate cache on %s' % cls.devtool_sstate) + runCmd('rm -rf %s' % cls.devtool_sstate) + def setUp(self): """Test case setup function""" super(DevtoolTests, self).setUp() @@ -121,6 +136,7 @@ class DevtoolTests(DevtoolBase): self.assertTrue(not os.path.exists(self.workspacedir), 'This test cannot be run with a workspace directory ' 'under the build directory') + self.append_config(self.sstate_conf) def _check_src_repo(self, repo_dir): """Check srctree git repository""" diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/oescripts.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/oescripts.py index 31cd50809..28345dc6a 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/oescripts.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/oescripts.py @@ -17,12 +17,8 @@ class TestScripts(oeSelfTest): path = os.path.dirname(get_bb_var('WORKDIR', 'gzip')) old_version_recipe = os.path.join(get_bb_var('COREBASE'), 'meta/recipes-extended/gzip/gzip_1.3.12.bb') old_version = '1.3.12' - bitbake("-ccleansstate gzip") - bitbake("-ccleansstate -b %s" % old_version_recipe) - if os.path.exists(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)): - shutil.rmtree(get_bb_var('WORKDIR', "-b %s" % old_version_recipe)) - if os.path.exists(get_bb_var('WORKDIR', 'gzip')): - shutil.rmtree(get_bb_var('WORKDIR', 'gzip')) + bitbake("-c clean gzip") + bitbake("-c clean -b %s" % old_version_recipe) if os.path.exists(path): initial_contents = os.listdir(path) diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/prservice.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/prservice.py index 1b9a510fd..0b2dfe649 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/prservice.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/prservice.py @@ -37,7 +37,6 @@ class BitbakePrTests(oeSelfTest): def increment_package_pr(self, package_name): inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now() self.write_recipeinc(package_name, inc_data) - bitbake("-ccleansstate %s" % package_name) res = bitbake(package_name, ignore_status=True) self.delete_recipeinc(package_name) self.assertEqual(res.status, 0, msg=res.output) @@ -60,7 +59,6 @@ class BitbakePrTests(oeSelfTest): pr_2 = self.get_pr_version(package_name) stamp_2 = self.get_task_stamp(package_name, track_task) - bitbake("-ccleansstate %s" % package_name) self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1") self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1) @@ -86,7 +84,6 @@ class BitbakePrTests(oeSelfTest): self.increment_package_pr(package_name) pr_2 = self.get_pr_version(package_name) - bitbake("-ccleansstate %s" % package_name) self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1") @testcase(930) diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/recipetool.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/recipetool.py index db1f8deeb..9b669248f 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/recipetool.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/recipetool.py @@ -71,11 +71,6 @@ class RecipetoolTests(RecipetoolBase): logger.info('Running bitbake to generate pkgdata') bitbake('-c packagedata base-files coreutils busybox selftest-recipetool-appendfile') - @classmethod - def tearDownClass(cls): - # Shouldn't leave any traces of this artificial recipe behind - bitbake('-c cleansstate selftest-recipetool-appendfile') - def _try_recipetool_appendfile(self, testrecipe, destfile, newfile, options, expectedlines, expectedfiles): cmd = 'recipetool appendfile %s %s %s %s' % (self.templayerdir, destfile, newfile, options) return self._try_recipetool_appendcmd(cmd, testrecipe, expectedfiles, expectedlines) @@ -369,15 +364,15 @@ class RecipetoolTests(RecipetoolBase): tempsrc = os.path.join(self.tempdir, 'srctree') os.makedirs(tempsrc) recipefile = os.path.join(self.tempdir, 'logrotate_3.8.7.bb') - srcuri = 'https://fedorahosted.org/releases/l/o/logrotate/logrotate-3.8.7.tar.gz' + srcuri = 'https://github.com/logrotate/logrotate/archive/r3-8-7.tar.gz' result = runCmd('recipetool create -o %s %s -x %s' % (recipefile, srcuri, tempsrc)) self.assertTrue(os.path.isfile(recipefile)) checkvars = {} checkvars['LICENSE'] = 'GPLv2' checkvars['LIC_FILES_CHKSUM'] = 'file://COPYING;md5=18810669f13b87348459e611d31ab760' - checkvars['SRC_URI'] = 'https://fedorahosted.org/releases/l/o/logrotate/logrotate-${PV}.tar.gz' - checkvars['SRC_URI[md5sum]'] = '99e08503ef24c3e2e3ff74cc5f3be213' - checkvars['SRC_URI[sha256sum]'] = 'f6ba691f40e30e640efa2752c1f9499a3f9738257660994de70a45fe00d12b64' + checkvars['SRC_URI'] = 'https://github.com/logrotate/logrotate/archive/r3-8-7.tar.gz' + checkvars['SRC_URI[md5sum]'] = '6b1aa0e0d07eda3c9a2526520850397a' + checkvars['SRC_URI[sha256sum]'] = 'dece4bfeb9d8374a0ecafa34be139b5a697db5c926dcc69a9b8715431a22e733' self._test_recipe_contents(recipefile, checkvars, []) @testcase(1194) @@ -447,8 +442,8 @@ class RecipetoolTests(RecipetoolBase): temprecipe = os.path.join(self.tempdir, 'recipe') os.makedirs(temprecipe) recipefile = os.path.join(temprecipe, 'meson_git.bb') - srcuri = 'https://github.com/mesonbuild/meson' - result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri)) + srcuri = 'https://github.com/mesonbuild/meson;rev=0.32.0' + result = runCmd(['recipetool', 'create', '-o', temprecipe, srcuri]) self.assertTrue(os.path.isfile(recipefile)) checkvars = {} checkvars['LICENSE'] = set(['Apache-2.0']) diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/signing.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/signing.py index 4c12d6d94..606bfd3e9 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/signing.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/signing.py @@ -54,8 +54,9 @@ class Signing(oeSelfTest): self.write_config(feature) - bitbake('-c cleansstate %s' % test_recipe) - bitbake(test_recipe) + bitbake('-c clean %s' % test_recipe) + bitbake('-f -c package_write_rpm %s' % test_recipe) + self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe) pkgdatadir = get_bb_var('PKGDATA_DIR', test_recipe) @@ -98,7 +99,6 @@ class Signing(oeSelfTest): sstatedir = os.path.join(builddir, 'test-sstate') self.add_command_to_tearDown('bitbake -c clean %s' % test_recipe) - self.add_command_to_tearDown('bitbake -c cleansstate %s' % test_recipe) self.add_command_to_tearDown('rm -rf %s' % sstatedir) # Determine the pub key signature @@ -112,10 +112,12 @@ class Signing(oeSelfTest): feature += 'SSTATE_VERIFY_SIG ?= "1"\n' feature += 'GPG_PATH = "%s"\n' % self.gpg_dir feature += 'SSTATE_DIR = "%s"\n' % sstatedir + # Any mirror might have partial sstate without .sig files, triggering failures + feature += 'SSTATE_MIRRORS_forcevariable = ""\n' self.write_config(feature) - bitbake('-c cleansstate %s' % test_recipe) + bitbake('-c clean %s' % test_recipe) bitbake(test_recipe) recipe_sig = glob.glob(sstatedir + '/*/*:ed:*_package.tgz.sig') diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py index 6642539eb..f99d74684 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/sstatetests.py @@ -16,7 +16,7 @@ class SStateTests(SStateBase): # Test sstate files creation and their location def run_test_sstate_creation(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True, should_pass=True): - self.config_sstate(temp_sstate_location) + self.config_sstate(temp_sstate_location, [self.sstate_path]) if self.temp_sstate_location: bitbake(['-cclean'] + targets) @@ -60,7 +60,7 @@ class SStateTests(SStateBase): # Test the sstate files deletion part of the do_cleansstate task def run_test_cleansstate_task(self, targets, distro_specific=True, distro_nonspecific=True, temp_sstate_location=True): - self.config_sstate(temp_sstate_location) + self.config_sstate(temp_sstate_location, [self.sstate_path]) bitbake(['-ccleansstate'] + targets) @@ -92,7 +92,7 @@ class SStateTests(SStateBase): # Test rebuilding of distro-specific sstate files def run_test_rebuild_distro_specific_sstate(self, targets, temp_sstate_location=True): - self.config_sstate(temp_sstate_location) + self.config_sstate(temp_sstate_location, [self.sstate_path]) bitbake(['-ccleansstate'] + targets) diff --git a/import-layers/yocto-poky/meta/lib/oeqa/selftest/wic.py b/import-layers/yocto-poky/meta/lib/oeqa/selftest/wic.py index faac11e21..e652fad24 100644 --- a/import-layers/yocto-poky/meta/lib/oeqa/selftest/wic.py +++ b/import-layers/yocto-poky/meta/lib/oeqa/selftest/wic.py @@ -42,7 +42,8 @@ class Wic(oeSelfTest): def setUpLocal(self): """This code is executed before each test method.""" self.write_config('IMAGE_FSTYPES += " hddimg"\n' - 'MACHINE_FEATURES_append = " efi"\n') + 'MACHINE_FEATURES_append = " efi"\n' + 'WKS_FILE = "wic-image-minimal"\n') # Do this here instead of in setUpClass as the base setUp does some # clean up which can result in the native tools built earlier in diff --git a/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-Enforce-no-pie-if-the-compiler-supports-it.patch b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-Enforce-no-pie-if-the-compiler-supports-it.patch new file mode 100644 index 000000000..ccdbee215 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-Enforce-no-pie-if-the-compiler-supports-it.patch @@ -0,0 +1,45 @@ +From 6186bcf1bcaaa0f16e79339e07c64c841d4d957d Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin <alex.kanavin@gmail.com> +Date: Fri, 2 Dec 2016 20:52:40 +0200 +Subject: [PATCH] Enforce -no-pie, if the compiler supports it. + +Add a -no-pie as recent (2 Dec 2016) Debian testing compiler +seems to default to enabling PIE when linking. See +https://wiki.ubuntu.com/SecurityTeam/PIE + +Upstream-Status: Pending +Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com> +--- + acinclude.m4 | 2 +- + configure.ac | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/acinclude.m4 b/acinclude.m4 +index 19200b0..a713923 100644 +--- a/acinclude.m4 ++++ b/acinclude.m4 +@@ -416,7 +416,7 @@ int main() { + + [# `$CC -c -o ...' might not be portable. But, oh, well... Is calling + # `ac_compile' like this correct, after all? +-if eval "$ac_compile -S -o conftest.s" 2> /dev/null; then] ++if eval "$ac_compile -S -o conftest.s" 2> /dev/null && eval "$CC -dumpspecs 2>/dev/null | grep -e no-pie" ; then] + AC_MSG_RESULT([yes]) + [# Should we clear up other files as well, having called `AC_LANG_CONFTEST'? + rm -f conftest.s +diff --git a/configure.ac b/configure.ac +index df20991..506c6b4 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -603,7 +603,7 @@ grub_CHECK_PIE + [# Need that, because some distributions ship compilers that include + # `-fPIE' in the default specs. + if [ x"$pie_possible" = xyes ]; then +- TARGET_CFLAGS="$TARGET_CFLAGS -fno-PIE" ++ TARGET_CFLAGS="$TARGET_CFLAGS -fno-PIE -no-pie" + fi] + + # Position independent executable. +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-grub-core-kern-efi-mm.c-grub_efi_finish_boot_service.patch b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-grub-core-kern-efi-mm.c-grub_efi_finish_boot_service.patch new file mode 100644 index 000000000..abf08e16c --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0001-grub-core-kern-efi-mm.c-grub_efi_finish_boot_service.patch @@ -0,0 +1,79 @@ +From b258761d11946b28a847dff0768c3f271e13d60a Mon Sep 17 00:00:00 2001 +From: Awais Belal <awais_belal@mentor.com> +Date: Thu, 8 Dec 2016 18:21:12 +0500 +Subject: [PATCH 1/2] * grub-core/kern/efi/mm.c + (grub_efi_finish_boot_services): Try terminating EFI services several times + due to quirks in some implementations. + +Upstream-status: Backport [ http://git.savannah.gnu.org/cgit/grub.git/patch/?id=e75fdee420a7ad95e9a465c9699adc2e2e970440 ] + +Signed-off-by: Awais Belal <awais_belal@mentor.com> +--- + grub-core/kern/efi/mm.c | 46 ++++++++++++++++++++++++++++++---------------- + 1 file changed, 30 insertions(+), 16 deletions(-) + +diff --git a/grub-core/kern/efi/mm.c b/grub-core/kern/efi/mm.c +index 461deb0..b00e0bc 100644 +--- a/grub-core/kern/efi/mm.c ++++ b/grub-core/kern/efi/mm.c +@@ -167,27 +167,41 @@ grub_efi_finish_boot_services (grub_efi_uintn_t *outbuf_size, void *outbuf, + apple, sizeof (apple)) == 0); + #endif + +- if (grub_efi_get_memory_map (&finish_mmap_size, finish_mmap_buf, &finish_key, +- &finish_desc_size, &finish_desc_version) < 0) +- return grub_error (GRUB_ERR_IO, "couldn't retrieve memory map"); ++ while (1) ++ { ++ if (grub_efi_get_memory_map (&finish_mmap_size, finish_mmap_buf, &finish_key, ++ &finish_desc_size, &finish_desc_version) < 0) ++ return grub_error (GRUB_ERR_IO, "couldn't retrieve memory map"); + +- if (outbuf && *outbuf_size < finish_mmap_size) +- return grub_error (GRUB_ERR_IO, "memory map buffer is too small"); ++ if (outbuf && *outbuf_size < finish_mmap_size) ++ return grub_error (GRUB_ERR_IO, "memory map buffer is too small"); + +- finish_mmap_buf = grub_malloc (finish_mmap_size); +- if (!finish_mmap_buf) +- return grub_errno; ++ finish_mmap_buf = grub_malloc (finish_mmap_size); ++ if (!finish_mmap_buf) ++ return grub_errno; + +- if (grub_efi_get_memory_map (&finish_mmap_size, finish_mmap_buf, &finish_key, +- &finish_desc_size, &finish_desc_version) <= 0) +- return grub_error (GRUB_ERR_IO, "couldn't retrieve memory map"); ++ if (grub_efi_get_memory_map (&finish_mmap_size, finish_mmap_buf, &finish_key, ++ &finish_desc_size, &finish_desc_version) <= 0) ++ { ++ grub_free (finish_mmap_buf); ++ return grub_error (GRUB_ERR_IO, "couldn't retrieve memory map"); ++ } + +- b = grub_efi_system_table->boot_services; +- status = efi_call_2 (b->exit_boot_services, grub_efi_image_handle, +- finish_key); +- if (status != GRUB_EFI_SUCCESS) +- return grub_error (GRUB_ERR_IO, "couldn't terminate EFI services"); ++ b = grub_efi_system_table->boot_services; ++ status = efi_call_2 (b->exit_boot_services, grub_efi_image_handle, ++ finish_key); ++ if (status == GRUB_EFI_SUCCESS) ++ break; + ++ if (status != GRUB_EFI_INVALID_PARAMETER) ++ { ++ grub_free (finish_mmap_buf); ++ return grub_error (GRUB_ERR_IO, "couldn't terminate EFI services"); ++ } ++ ++ grub_free (finish_mmap_buf); ++ grub_printf ("Trying to terminate EFI services again\n"); ++ } + grub_efi_is_finished = 1; + if (outbuf_size) + *outbuf_size = finish_mmap_size; +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0002-grub-core-kern-efi-mm.c-grub_efi_get_memory_map-Neve.patch b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0002-grub-core-kern-efi-mm.c-grub_efi_get_memory_map-Neve.patch new file mode 100644 index 000000000..0e735ffcd --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-bsp/grub/files/0002-grub-core-kern-efi-mm.c-grub_efi_get_memory_map-Neve.patch @@ -0,0 +1,43 @@ +From 630de45f3d5f9a2dda7fad99acd21449b8c4111d Mon Sep 17 00:00:00 2001 +From: Awais Belal <awais_belal@mentor.com> +Date: Thu, 8 Dec 2016 18:27:01 +0500 +Subject: [PATCH 2/2] * grub-core/kern/efi/mm.c (grub_efi_get_memory_map): + Never return a descriptor_size==0 to avoid potential divisions by zero. + +Upstream-status: Backport [ http://git.savannah.gnu.org/cgit/grub.git/commit/?id=69aee43fa64601cabf6efa9279c10d69b466662e ] + +Signed-off-by: Awais Belal <awais_belal@mentor.com> +--- + grub-core/kern/efi/mm.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/grub-core/kern/efi/mm.c b/grub-core/kern/efi/mm.c +index b00e0bc..9f1d194 100644 +--- a/grub-core/kern/efi/mm.c ++++ b/grub-core/kern/efi/mm.c +@@ -235,6 +235,7 @@ grub_efi_get_memory_map (grub_efi_uintn_t *memory_map_size, + grub_efi_boot_services_t *b; + grub_efi_uintn_t key; + grub_efi_uint32_t version; ++ grub_efi_uintn_t size; + + if (grub_efi_is_finished) + { +@@ -264,10 +265,14 @@ grub_efi_get_memory_map (grub_efi_uintn_t *memory_map_size, + map_key = &key; + if (! descriptor_version) + descriptor_version = &version; ++ if (! descriptor_size) ++ descriptor_size = &size; + + b = grub_efi_system_table->boot_services; + status = efi_call_5 (b->get_memory_map, memory_map_size, memory_map, map_key, + descriptor_size, descriptor_version); ++ if (*descriptor_size == 0) ++ *descriptor_size = sizeof (grub_efi_memory_descriptor_t); + if (status == GRUB_EFI_SUCCESS) + return 1; + else if (status == GRUB_EFI_BUFFER_TOO_SMALL) +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-bsp/grub/grub2.inc b/import-layers/yocto-poky/meta/recipes-bsp/grub/grub2.inc index b10f633aa..b69de9f34 100644 --- a/import-layers/yocto-poky/meta/recipes-bsp/grub/grub2.inc +++ b/import-layers/yocto-poky/meta/recipes-bsp/grub/grub2.inc @@ -32,6 +32,9 @@ SRC_URI = "ftp://ftp.gnu.org/gnu/grub/grub-${PV}.tar.gz \ file://0001-Remove-direct-_llseek-code-and-require-long-filesyst.patch \ file://fix-texinfo.patch \ file://0001-grub-core-gettext-gettext.c-main_context-secondary_c.patch \ + file://0001-Enforce-no-pie-if-the-compiler-supports-it.patch \ + file://0001-grub-core-kern-efi-mm.c-grub_efi_finish_boot_service.patch \ + file://0002-grub-core-kern-efi-mm.c-grub_efi_get_memory_map-Neve.patch \ " DEPENDS = "flex-native bison-native autogen-native" diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-6170.patch b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-6170.patch new file mode 100644 index 000000000..75bc211cb --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-6170.patch @@ -0,0 +1,1090 @@ +From 1bbcfe2fc84f57b1e4e075fb3bc2a1dd0a3a851f Mon Sep 17 00:00:00 2001 +From: Mark Andrews <marka@isc.org> +Date: Wed, 2 Nov 2016 17:31:27 +1100 +Subject: [PATCH] 4504. [security] Allow the maximum number of records in a + zone to be specified. This provides a control for issues raised in + CVE-2016-6170. [RT #42143] + +(cherry picked from commit 5f8412a4cb5ee14a0e8cddd4107854b40ee3291e) + +Upstream-Status: Backport +[https://source.isc.org/cgi-bin/gitweb.cgi?p=bind9.git;a=commit;h=1bbcfe2fc84f57b1e4e075fb3bc2a1dd0a3a851f] + +CVE: CVE-2016-6170 + +Signed-off-by: Yi Zhao <yi.zhao@windriver.com> +--- + CHANGES | 4 + + bin/named/config.c | 1 + + bin/named/named.conf.docbook | 3 + + bin/named/update.c | 16 +++ + bin/named/zoneconf.c | 7 ++ + bin/tests/system/nsupdate/clean.sh | 1 + + bin/tests/system/nsupdate/ns3/named.conf | 7 ++ + bin/tests/system/nsupdate/ns3/too-big.test.db.in | 10 ++ + bin/tests/system/nsupdate/setup.sh | 2 + + bin/tests/system/nsupdate/tests.sh | 15 +++ + bin/tests/system/xfer/clean.sh | 1 + + bin/tests/system/xfer/ns1/axfr-too-big.db | 10 ++ + bin/tests/system/xfer/ns1/ixfr-too-big.db.in | 13 +++ + bin/tests/system/xfer/ns1/named.conf | 11 ++ + bin/tests/system/xfer/ns6/named.conf | 14 +++ + bin/tests/system/xfer/setup.sh | 2 + + bin/tests/system/xfer/tests.sh | 26 +++++ + doc/arm/Bv9ARM-book.xml | 21 ++++ + doc/arm/notes.xml | 9 ++ + lib/bind9/check.c | 2 + + lib/dns/db.c | 13 +++ + lib/dns/ecdb.c | 3 +- + lib/dns/include/dns/db.h | 20 ++++ + lib/dns/include/dns/rdataslab.h | 13 +++ + lib/dns/include/dns/result.h | 6 +- + lib/dns/include/dns/zone.h | 28 ++++- + lib/dns/rbtdb.c | 127 +++++++++++++++++++++-- + lib/dns/rdataslab.c | 13 +++ + lib/dns/result.c | 9 +- + lib/dns/sdb.c | 3 +- + lib/dns/sdlz.c | 3 +- + lib/dns/xfrin.c | 22 +++- + lib/dns/zone.c | 23 +++- + lib/isccfg/namedconf.c | 1 + + 34 files changed, 444 insertions(+), 15 deletions(-) + create mode 100644 bin/tests/system/nsupdate/ns3/too-big.test.db.in + create mode 100644 bin/tests/system/xfer/ns1/axfr-too-big.db + create mode 100644 bin/tests/system/xfer/ns1/ixfr-too-big.db.in + +diff --git a/CHANGES b/CHANGES +index 41cfce5..97d2e60 100644 +--- a/CHANGES ++++ b/CHANGES +@@ -1,3 +1,7 @@ ++4504. [security] Allow the maximum number of records in a zone to ++ be specified. This provides a control for issues ++ raised in CVE-2016-6170. [RT #42143] ++ + 4489. [security] It was possible to trigger assertions when processing + a response. (CVE-2016-8864) [RT #43465] + +diff --git a/bin/named/config.c b/bin/named/config.c +index f06348c..c24e334 100644 +--- a/bin/named/config.c ++++ b/bin/named/config.c +@@ -209,6 +209,7 @@ options {\n\ + max-transfer-time-out 120;\n\ + max-transfer-idle-in 60;\n\ + max-transfer-idle-out 60;\n\ ++ max-records 0;\n\ + max-retry-time 1209600; /* 2 weeks */\n\ + min-retry-time 500;\n\ + max-refresh-time 2419200; /* 4 weeks */\n\ +diff --git a/bin/named/named.conf.docbook b/bin/named/named.conf.docbook +index 4c99a61..c2d173a 100644 +--- a/bin/named/named.conf.docbook ++++ b/bin/named/named.conf.docbook +@@ -338,6 +338,7 @@ options { + }; + + max-journal-size <replaceable>size_no_default</replaceable>; ++ max-records <replaceable>integer</replaceable>; + max-transfer-time-in <replaceable>integer</replaceable>; + max-transfer-time-out <replaceable>integer</replaceable>; + max-transfer-idle-in <replaceable>integer</replaceable>; +@@ -527,6 +528,7 @@ view <replaceable>string</replaceable> <replaceable>optional_class</replaceable> + }; + + max-journal-size <replaceable>size_no_default</replaceable>; ++ max-records <replaceable>integer</replaceable>; + max-transfer-time-in <replaceable>integer</replaceable>; + max-transfer-time-out <replaceable>integer</replaceable>; + max-transfer-idle-in <replaceable>integer</replaceable>; +@@ -624,6 +626,7 @@ zone <replaceable>string</replaceable> <replaceable>optional_class</replaceable> + }; + + max-journal-size <replaceable>size_no_default</replaceable>; ++ max-records <replaceable>integer</replaceable>; + max-transfer-time-in <replaceable>integer</replaceable>; + max-transfer-time-out <replaceable>integer</replaceable>; + max-transfer-idle-in <replaceable>integer</replaceable>; +diff --git a/bin/named/update.c b/bin/named/update.c +index 83b1a05..cc2a611 100644 +--- a/bin/named/update.c ++++ b/bin/named/update.c +@@ -2455,6 +2455,8 @@ update_action(isc_task_t *task, isc_event_t *event) { + isc_boolean_t had_dnskey; + dns_rdatatype_t privatetype = dns_zone_getprivatetype(zone); + dns_ttl_t maxttl = 0; ++ isc_uint32_t maxrecords; ++ isc_uint64_t records; + + INSIST(event->ev_type == DNS_EVENT_UPDATE); + +@@ -3138,6 +3140,20 @@ update_action(isc_task_t *task, isc_event_t *event) { + } + } + ++ maxrecords = dns_zone_getmaxrecords(zone); ++ if (maxrecords != 0U) { ++ result = dns_db_getsize(db, ver, &records, NULL); ++ if (result == ISC_R_SUCCESS && records > maxrecords) { ++ update_log(client, zone, ISC_LOG_ERROR, ++ "records in zone (%" ++ ISC_PRINT_QUADFORMAT ++ "u) exceeds max-records (%u)", ++ records, maxrecords); ++ result = DNS_R_TOOMANYRECORDS; ++ goto failure; ++ } ++ } ++ + journalfile = dns_zone_getjournal(zone); + if (journalfile != NULL) { + update_log(client, zone, LOGLEVEL_DEBUG, +diff --git a/bin/named/zoneconf.c b/bin/named/zoneconf.c +index 4ee3dfe..14dd8ce 100644 +--- a/bin/named/zoneconf.c ++++ b/bin/named/zoneconf.c +@@ -978,6 +978,13 @@ ns_zone_configure(const cfg_obj_t *config, const cfg_obj_t *vconfig, + dns_zone_setmaxttl(raw, maxttl); + } + ++ obj = NULL; ++ result = ns_config_get(maps, "max-records", &obj); ++ INSIST(result == ISC_R_SUCCESS && obj != NULL); ++ dns_zone_setmaxrecords(mayberaw, cfg_obj_asuint32(obj)); ++ if (zone != mayberaw) ++ dns_zone_setmaxrecords(zone, 0); ++ + if (raw != NULL && filename != NULL) { + #define SIGNED ".signed" + size_t signedlen = strlen(filename) + sizeof(SIGNED); +diff --git a/bin/tests/system/nsupdate/clean.sh b/bin/tests/system/nsupdate/clean.sh +index aaefc02..ea25545 100644 +--- a/bin/tests/system/nsupdate/clean.sh ++++ b/bin/tests/system/nsupdate/clean.sh +@@ -32,6 +32,7 @@ rm -f ns3/example.db.jnl ns3/example.db + rm -f ns3/nsec3param.test.db.signed.jnl ns3/nsec3param.test.db ns3/nsec3param.test.db.signed ns3/dsset-nsec3param.test. + rm -f ns3/dnskey.test.db.signed.jnl ns3/dnskey.test.db ns3/dnskey.test.db.signed ns3/dsset-dnskey.test. + rm -f ns3/K* ++rm -f ns3/too-big.test.db + rm -f dig.out.* + rm -f jp.out.ns3.* + rm -f Kxxx.* +diff --git a/bin/tests/system/nsupdate/ns3/named.conf b/bin/tests/system/nsupdate/ns3/named.conf +index 2abd522..68ff27a 100644 +--- a/bin/tests/system/nsupdate/ns3/named.conf ++++ b/bin/tests/system/nsupdate/ns3/named.conf +@@ -60,3 +60,10 @@ zone "dnskey.test" { + allow-update { any; }; + file "dnskey.test.db.signed"; + }; ++ ++zone "too-big.test" { ++ type master; ++ allow-update { any; }; ++ max-records 3; ++ file "too-big.test.db"; ++}; +diff --git a/bin/tests/system/nsupdate/ns3/too-big.test.db.in b/bin/tests/system/nsupdate/ns3/too-big.test.db.in +new file mode 100644 +index 0000000..7ff1e4a +--- /dev/null ++++ b/bin/tests/system/nsupdate/ns3/too-big.test.db.in +@@ -0,0 +1,10 @@ ++; Copyright (C) 2016 Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ ++$TTL 10 ++too-big.test. IN SOA too-big.test. hostmaster.too-big.test. 1 3600 900 2419200 3600 ++too-big.test. IN NS too-big.test. ++too-big.test. IN A 10.53.0.3 +diff --git a/bin/tests/system/nsupdate/setup.sh b/bin/tests/system/nsupdate/setup.sh +index 828255e..43c4094 100644 +--- a/bin/tests/system/nsupdate/setup.sh ++++ b/bin/tests/system/nsupdate/setup.sh +@@ -27,12 +27,14 @@ test -r $RANDFILE || $GENRANDOM 400 $RANDFILE + rm -f ns1/*.jnl ns1/example.db ns2/*.jnl ns2/example.bk + rm -f ns2/update.bk ns2/update.alt.bk + rm -f ns3/example.db.jnl ++rm -f ns3/too-big.test.db.jnl + + cp -f ns1/example1.db ns1/example.db + sed 's/example.nil/other.nil/g' ns1/example1.db > ns1/other.db + sed 's/example.nil/unixtime.nil/g' ns1/example1.db > ns1/unixtime.db + sed 's/example.nil/keytests.nil/g' ns1/example1.db > ns1/keytests.db + cp -f ns3/example.db.in ns3/example.db ++cp -f ns3/too-big.test.db.in ns3/too-big.test.db + + # update_test.pl has its own zone file because it + # requires a specific NS record set. +diff --git a/bin/tests/system/nsupdate/tests.sh b/bin/tests/system/nsupdate/tests.sh +index 78d501e..0a6bbd3 100755 +--- a/bin/tests/system/nsupdate/tests.sh ++++ b/bin/tests/system/nsupdate/tests.sh +@@ -581,5 +581,20 @@ if [ $ret -ne 0 ]; then + status=1 + fi + ++n=`expr $n + 1` ++echo "I:check that adding too many records is blocked ($n)" ++ret=0 ++$NSUPDATE -v << EOF > nsupdate.out-$n 2>&1 && ret=1 ++server 10.53.0.3 5300 ++zone too-big.test. ++update add r1.too-big.test 3600 IN TXT r1.too-big.test ++send ++EOF ++grep "update failed: SERVFAIL" nsupdate.out-$n > /dev/null || ret=1 ++DIG +tcp @10.53.0.3 -p 5300 r1.too-big.test TXT > dig.out.ns3.test$n ++grep "status: NXDOMAIN" dig.out.ns3.test$n > /dev/null || ret=1 ++grep "records in zone (4) exceeds max-records (3)" ns3/named.run > /dev/null || ret=1 ++[ $ret = 0 ] || { echo I:failed; status=1; } ++ + echo "I:exit status: $status" + exit $status +diff --git a/bin/tests/system/xfer/clean.sh b/bin/tests/system/xfer/clean.sh +index 48aa159..da62a33 100644 +--- a/bin/tests/system/xfer/clean.sh ++++ b/bin/tests/system/xfer/clean.sh +@@ -36,3 +36,4 @@ rm -f ns7/*.db ns7/*.bk ns7/*.jnl + rm -f */named.memstats + rm -f */named.run + rm -f */ans.run ++rm -f ns1/ixfr-too-big.db ns1/ixfr-too-big.db.jnl +diff --git a/bin/tests/system/xfer/ns1/axfr-too-big.db b/bin/tests/system/xfer/ns1/axfr-too-big.db +new file mode 100644 +index 0000000..d43760d +--- /dev/null ++++ b/bin/tests/system/xfer/ns1/axfr-too-big.db +@@ -0,0 +1,10 @@ ++; Copyright (C) 2016 Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ ++$TTL 3600 ++@ IN SOA . . 0 0 0 0 0 ++@ IN NS . ++$GENERATE 1-29 host$ A 1.2.3.$ +diff --git a/bin/tests/system/xfer/ns1/ixfr-too-big.db.in b/bin/tests/system/xfer/ns1/ixfr-too-big.db.in +new file mode 100644 +index 0000000..318bb77 +--- /dev/null ++++ b/bin/tests/system/xfer/ns1/ixfr-too-big.db.in +@@ -0,0 +1,13 @@ ++; Copyright (C) 2016 Internet Systems Consortium, Inc. ("ISC") ++; ++; This Source Code Form is subject to the terms of the Mozilla Public ++; License, v. 2.0. If a copy of the MPL was not distributed with this ++; file, You can obtain one at http://mozilla.org/MPL/2.0/. ++ ++$TTL 3600 ++@ IN SOA . . 0 0 0 0 0 ++@ IN NS ns1 ++@ IN NS ns6 ++ns1 IN A 10.53.0.1 ++ns6 IN A 10.53.0.6 ++$GENERATE 1-25 host$ A 1.2.3.$ +diff --git a/bin/tests/system/xfer/ns1/named.conf b/bin/tests/system/xfer/ns1/named.conf +index 07dad85..1d29292 100644 +--- a/bin/tests/system/xfer/ns1/named.conf ++++ b/bin/tests/system/xfer/ns1/named.conf +@@ -44,3 +44,14 @@ zone "slave" { + type master; + file "slave.db"; + }; ++ ++zone "axfr-too-big" { ++ type master; ++ file "axfr-too-big.db"; ++}; ++ ++zone "ixfr-too-big" { ++ type master; ++ allow-update { any; }; ++ file "ixfr-too-big.db"; ++}; +diff --git a/bin/tests/system/xfer/ns6/named.conf b/bin/tests/system/xfer/ns6/named.conf +index c9421b1..a12a92c 100644 +--- a/bin/tests/system/xfer/ns6/named.conf ++++ b/bin/tests/system/xfer/ns6/named.conf +@@ -52,3 +52,17 @@ zone "slave" { + masters { 10.53.0.1; }; + file "slave.bk"; + }; ++ ++zone "axfr-too-big" { ++ type slave; ++ max-records 30; ++ masters { 10.53.0.1; }; ++ file "axfr-too-big.bk"; ++}; ++ ++zone "ixfr-too-big" { ++ type slave; ++ max-records 30; ++ masters { 10.53.0.1; }; ++ file "ixfr-too-big.bk"; ++}; +diff --git a/bin/tests/system/xfer/setup.sh b/bin/tests/system/xfer/setup.sh +index 56ca901..c55abf8 100644 +--- a/bin/tests/system/xfer/setup.sh ++++ b/bin/tests/system/xfer/setup.sh +@@ -33,3 +33,5 @@ cp -f ns4/named.conf.base ns4/named.conf + + cp ns2/slave.db.in ns2/slave.db + touch -t 200101010000 ns2/slave.db ++ ++cp -f ns1/ixfr-too-big.db.in ns1/ixfr-too-big.db +diff --git a/bin/tests/system/xfer/tests.sh b/bin/tests/system/xfer/tests.sh +index 67b2a1a..fe33f0a 100644 +--- a/bin/tests/system/xfer/tests.sh ++++ b/bin/tests/system/xfer/tests.sh +@@ -368,5 +368,31 @@ $DIGCMD nil. TXT | grep 'incorrect key AXFR' >/dev/null && { + status=1 + } + ++n=`expr $n + 1` ++echo "I:test that a zone with too many records is rejected (AXFR) ($n)" ++tmp=0 ++grep "'axfr-too-big/IN'.*: too many records" ns6/named.run >/dev/null || tmp=1 ++if test $tmp != 0 ; then echo "I:failed"; fi ++status=`expr $status + $tmp` ++ ++n=`expr $n + 1` ++echo "I:test that a zone with too many records is rejected (IXFR) ($n)" ++tmp=0 ++grep "'ixfr-too-big./IN.*: too many records" ns6/named.run >/dev/null && tmp=1 ++$NSUPDATE << EOF ++zone ixfr-too-big ++server 10.53.0.1 5300 ++update add the-31st-record.ixfr-too-big 0 TXT this is it ++send ++EOF ++for i in 1 2 3 4 5 6 7 8 ++do ++ grep "'ixfr-too-big/IN'.*: too many records" ns6/named.run >/dev/null && break ++ sleep 1 ++done ++grep "'ixfr-too-big/IN'.*: too many records" ns6/named.run >/dev/null || tmp=1 ++if test $tmp != 0 ; then echo "I:failed"; fi ++status=`expr $status + $tmp` ++ + echo "I:exit status: $status" + exit $status +diff --git a/doc/arm/Bv9ARM-book.xml b/doc/arm/Bv9ARM-book.xml +index 848b582..0369505 100644 +--- a/doc/arm/Bv9ARM-book.xml ++++ b/doc/arm/Bv9ARM-book.xml +@@ -4858,6 +4858,7 @@ badresp:1,adberr:0,findfail:0,valfail:0] + <optional> use-queryport-pool <replaceable>yes_or_no</replaceable>; </optional> + <optional> queryport-pool-ports <replaceable>number</replaceable>; </optional> + <optional> queryport-pool-updateinterval <replaceable>number</replaceable>; </optional> ++ <optional> max-records <replaceable>number</replaceable>; </optional> + <optional> max-transfer-time-in <replaceable>number</replaceable>; </optional> + <optional> max-transfer-time-out <replaceable>number</replaceable>; </optional> + <optional> max-transfer-idle-in <replaceable>number</replaceable>; </optional> +@@ -8164,6 +8165,16 @@ avoid-v6-udp-ports { 40000; range 50000 60000; }; + </varlistentry> + + <varlistentry> ++ <term><command>max-records</command></term> ++ <listitem> ++ <para> ++ The maximum number of records permitted in a zone. ++ The default is zero which means unlimited. ++ </para> ++ </listitem> ++ </varlistentry> ++ ++ <varlistentry> + <term><command>host-statistics-max</command></term> + <listitem> + <para> +@@ -12056,6 +12067,16 @@ zone <replaceable>zone_name</replaceable> <optional><replaceable>class</replacea + </varlistentry> + + <varlistentry> ++ <term><command>max-records</command></term> ++ <listitem> ++ <para> ++ See the description of ++ <command>max-records</command> in <xref linkend="server_resource_limits"/>. ++ </para> ++ </listitem> ++ </varlistentry> ++ ++ <varlistentry> + <term><command>max-transfer-time-in</command></term> + <listitem> + <para> +diff --git a/doc/arm/notes.xml b/doc/arm/notes.xml +index 095eb5b..36495e7 100644 +--- a/doc/arm/notes.xml ++++ b/doc/arm/notes.xml +@@ -52,6 +52,15 @@ + <itemizedlist> + <listitem> + <para> ++ Added the ability to specify the maximum number of records ++ permitted in a zone (max-records #;). This provides a mechanism ++ to block overly large zone transfers, which is a potential risk ++ with slave zones from other parties, as described in CVE-2016-6170. ++ [RT #42143] ++ </para> ++ </listitem> ++ <listitem> ++ <para> + Duplicate EDNS COOKIE options in a response could trigger + an assertion failure. This flaw is disclosed in CVE-2016-2088. + [RT #41809] +diff --git a/lib/bind9/check.c b/lib/bind9/check.c +index b8c05dd..edb7534 100644 +--- a/lib/bind9/check.c ++++ b/lib/bind9/check.c +@@ -1510,6 +1510,8 @@ check_zoneconf(const cfg_obj_t *zconfig, const cfg_obj_t *voptions, + REDIRECTZONE }, + { "masters", SLAVEZONE | STUBZONE | REDIRECTZONE }, + { "max-ixfr-log-size", MASTERZONE | SLAVEZONE | STREDIRECTZONE }, ++ { "max-records", MASTERZONE | SLAVEZONE | STUBZONE | STREDIRECTZONE | ++ STATICSTUBZONE | REDIRECTZONE }, + { "max-refresh-time", SLAVEZONE | STUBZONE | STREDIRECTZONE }, + { "max-retry-time", SLAVEZONE | STUBZONE | STREDIRECTZONE }, + { "max-transfer-idle-in", SLAVEZONE | STUBZONE | STREDIRECTZONE }, +diff --git a/lib/dns/db.c b/lib/dns/db.c +index 7e4f357..ced94a5 100644 +--- a/lib/dns/db.c ++++ b/lib/dns/db.c +@@ -999,6 +999,19 @@ dns_db_getnsec3parameters(dns_db_t *db, dns_dbversion_t *version, + } + + isc_result_t ++dns_db_getsize(dns_db_t *db, dns_dbversion_t *version, isc_uint64_t *records, ++ isc_uint64_t *bytes) ++{ ++ REQUIRE(DNS_DB_VALID(db)); ++ REQUIRE(dns_db_iszone(db) == ISC_TRUE); ++ ++ if (db->methods->getsize != NULL) ++ return ((db->methods->getsize)(db, version, records, bytes)); ++ ++ return (ISC_R_NOTFOUND); ++} ++ ++isc_result_t + dns_db_setsigningtime(dns_db_t *db, dns_rdataset_t *rdataset, + isc_stdtime_t resign) + { +diff --git a/lib/dns/ecdb.c b/lib/dns/ecdb.c +index 553a339..b5d04d2 100644 +--- a/lib/dns/ecdb.c ++++ b/lib/dns/ecdb.c +@@ -587,7 +587,8 @@ static dns_dbmethods_t ecdb_methods = { + NULL, /* findnodeext */ + NULL, /* findext */ + NULL, /* setcachestats */ +- NULL /* hashsize */ ++ NULL, /* hashsize */ ++ NULL /* getsize */ + }; + + static isc_result_t +diff --git a/lib/dns/include/dns/db.h b/lib/dns/include/dns/db.h +index a4a4482..aff42d6 100644 +--- a/lib/dns/include/dns/db.h ++++ b/lib/dns/include/dns/db.h +@@ -195,6 +195,8 @@ typedef struct dns_dbmethods { + dns_rdataset_t *sigrdataset); + isc_result_t (*setcachestats)(dns_db_t *db, isc_stats_t *stats); + unsigned int (*hashsize)(dns_db_t *db); ++ isc_result_t (*getsize)(dns_db_t *db, dns_dbversion_t *version, ++ isc_uint64_t *records, isc_uint64_t *bytes); + } dns_dbmethods_t; + + typedef isc_result_t +@@ -1485,6 +1487,24 @@ dns_db_getnsec3parameters(dns_db_t *db, dns_dbversion_t *version, + */ + + isc_result_t ++dns_db_getsize(dns_db_t *db, dns_dbversion_t *version, isc_uint64_t *records, ++ isc_uint64_t *bytes); ++/*%< ++ * Get the number of records in the given version of the database as well ++ * as the number bytes used to store those records. ++ * ++ * Requires: ++ * \li 'db' is a valid zone database. ++ * \li 'version' is NULL or a valid version. ++ * \li 'records' is NULL or a pointer to return the record count in. ++ * \li 'bytes' is NULL or a pointer to return the byte count in. ++ * ++ * Returns: ++ * \li #ISC_R_SUCCESS ++ * \li #ISC_R_NOTIMPLEMENTED ++ */ ++ ++isc_result_t + dns_db_findnsec3node(dns_db_t *db, dns_name_t *name, + isc_boolean_t create, dns_dbnode_t **nodep); + /*%< +diff --git a/lib/dns/include/dns/rdataslab.h b/lib/dns/include/dns/rdataslab.h +index 3ac44b8..2e1e759 100644 +--- a/lib/dns/include/dns/rdataslab.h ++++ b/lib/dns/include/dns/rdataslab.h +@@ -104,6 +104,7 @@ dns_rdataslab_tordataset(unsigned char *slab, unsigned int reservelen, + * Ensures: + *\li 'rdataset' is associated and points to a valid rdataest. + */ ++ + unsigned int + dns_rdataslab_size(unsigned char *slab, unsigned int reservelen); + /*%< +@@ -116,6 +117,18 @@ dns_rdataslab_size(unsigned char *slab, unsigned int reservelen); + *\li The number of bytes in the slab, including the reservelen. + */ + ++unsigned int ++dns_rdataslab_count(unsigned char *slab, unsigned int reservelen); ++/*%< ++ * Return the number of records in the rdataslab ++ * ++ * Requires: ++ *\li 'slab' points to a slab. ++ * ++ * Returns: ++ *\li The number of records in the slab. ++ */ ++ + isc_result_t + dns_rdataslab_merge(unsigned char *oslab, unsigned char *nslab, + unsigned int reservelen, isc_mem_t *mctx, +diff --git a/lib/dns/include/dns/result.h b/lib/dns/include/dns/result.h +index 7d11c2b..93d1fd5 100644 +--- a/lib/dns/include/dns/result.h ++++ b/lib/dns/include/dns/result.h +@@ -157,8 +157,12 @@ + #define DNS_R_BADCDS (ISC_RESULTCLASS_DNS + 111) + #define DNS_R_BADCDNSKEY (ISC_RESULTCLASS_DNS + 112) + #define DNS_R_OPTERR (ISC_RESULTCLASS_DNS + 113) ++#define DNS_R_BADDNSTAP (ISC_RESULTCLASS_DNS + 114) ++#define DNS_R_BADTSIG (ISC_RESULTCLASS_DNS + 115) ++#define DNS_R_BADSIG0 (ISC_RESULTCLASS_DNS + 116) ++#define DNS_R_TOOMANYRECORDS (ISC_RESULTCLASS_DNS + 117) + +-#define DNS_R_NRESULTS 114 /*%< Number of results */ ++#define DNS_R_NRESULTS 118 /*%< Number of results */ + + /* + * DNS wire format rcodes. +diff --git a/lib/dns/include/dns/zone.h b/lib/dns/include/dns/zone.h +index a9367f1..227540b 100644 +--- a/lib/dns/include/dns/zone.h ++++ b/lib/dns/include/dns/zone.h +@@ -296,6 +296,32 @@ dns_zone_getfile(dns_zone_t *zone); + */ + + void ++dns_zone_setmaxrecords(dns_zone_t *zone, isc_uint32_t records); ++/*%< ++ * Sets the maximim number of records permitted in a zone. ++ * 0 implies unlimited. ++ * ++ * Requires: ++ *\li 'zone' to be valid initialised zone. ++ * ++ * Returns: ++ *\li void ++ */ ++ ++isc_uint32_t ++dns_zone_getmaxrecords(dns_zone_t *zone); ++/*%< ++ * Gets the maximim number of records permitted in a zone. ++ * 0 implies unlimited. ++ * ++ * Requires: ++ *\li 'zone' to be valid initialised zone. ++ * ++ * Returns: ++ *\li isc_uint32_t maxrecords. ++ */ ++ ++void + dns_zone_setmaxttl(dns_zone_t *zone, isc_uint32_t maxttl); + /*%< + * Sets the max ttl of the zone. +@@ -316,7 +342,7 @@ dns_zone_getmaxttl(dns_zone_t *zone); + *\li 'zone' to be valid initialised zone. + * + * Returns: +- *\li isc_uint32_t maxttl. ++ *\li dns_ttl_t maxttl. + */ + + isc_result_t +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 62becfc..72d722f 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -209,6 +209,7 @@ typedef isc_uint64_t rbtdb_serial_t; + #define free_rbtdb_callback free_rbtdb_callback64 + #define free_rdataset free_rdataset64 + #define getnsec3parameters getnsec3parameters64 ++#define getsize getsize64 + #define getoriginnode getoriginnode64 + #define getrrsetstats getrrsetstats64 + #define getsigningtime getsigningtime64 +@@ -589,6 +590,13 @@ typedef struct rbtdb_version { + isc_uint16_t iterations; + isc_uint8_t salt_length; + unsigned char salt[DNS_NSEC3_SALTSIZE]; ++ ++ /* ++ * records and bytes are covered by rwlock. ++ */ ++ isc_rwlock_t rwlock; ++ isc_uint64_t records; ++ isc_uint64_t bytes; + } rbtdb_version_t; + + typedef ISC_LIST(rbtdb_version_t) rbtdb_versionlist_t; +@@ -1130,6 +1138,7 @@ free_rbtdb(dns_rbtdb_t *rbtdb, isc_boolean_t log, isc_event_t *event) { + INSIST(refs == 0); + UNLINK(rbtdb->open_versions, rbtdb->current_version, link); + isc_refcount_destroy(&rbtdb->current_version->references); ++ isc_rwlock_destroy(&rbtdb->current_version->rwlock); + isc_mem_put(rbtdb->common.mctx, rbtdb->current_version, + sizeof(rbtdb_version_t)); + } +@@ -1383,6 +1392,7 @@ allocate_version(isc_mem_t *mctx, rbtdb_serial_t serial, + + static isc_result_t + newversion(dns_db_t *db, dns_dbversion_t **versionp) { ++ isc_result_t result; + dns_rbtdb_t *rbtdb = (dns_rbtdb_t *)db; + rbtdb_version_t *version; + +@@ -1415,13 +1425,28 @@ newversion(dns_db_t *db, dns_dbversion_t **versionp) { + version->salt_length = 0; + memset(version->salt, 0, sizeof(version->salt)); + } +- rbtdb->next_serial++; +- rbtdb->future_version = version; +- } ++ result = isc_rwlock_init(&version->rwlock, 0, 0); ++ if (result != ISC_R_SUCCESS) { ++ isc_refcount_destroy(&version->references); ++ isc_mem_put(rbtdb->common.mctx, version, ++ sizeof(*version)); ++ version = NULL; ++ } else { ++ RWLOCK(&rbtdb->current_version->rwlock, ++ isc_rwlocktype_read); ++ version->records = rbtdb->current_version->records; ++ version->bytes = rbtdb->current_version->bytes; ++ RWUNLOCK(&rbtdb->current_version->rwlock, ++ isc_rwlocktype_read); ++ rbtdb->next_serial++; ++ rbtdb->future_version = version; ++ } ++ } else ++ result = ISC_R_NOMEMORY; + RBTDB_UNLOCK(&rbtdb->lock, isc_rwlocktype_write); + + if (version == NULL) +- return (ISC_R_NOMEMORY); ++ return (result); + + *versionp = version; + +@@ -2681,6 +2706,7 @@ closeversion(dns_db_t *db, dns_dbversion_t **versionp, isc_boolean_t commit) { + + if (cleanup_version != NULL) { + INSIST(EMPTY(cleanup_version->changed_list)); ++ isc_rwlock_destroy(&cleanup_version->rwlock); + isc_mem_put(rbtdb->common.mctx, cleanup_version, + sizeof(*cleanup_version)); + } +@@ -6254,6 +6280,26 @@ add32(dns_rbtdb_t *rbtdb, dns_rbtnode_t *rbtnode, rbtdb_version_t *rbtversion, + else + rbtnode->data = newheader; + newheader->next = topheader->next; ++ if (rbtversion != NULL) ++ RWLOCK(&rbtversion->rwlock, isc_rwlocktype_write); ++ if (rbtversion != NULL && !header_nx) { ++ rbtversion->records -= ++ dns_rdataslab_count((unsigned char *)header, ++ sizeof(*header)); ++ rbtversion->bytes -= ++ dns_rdataslab_size((unsigned char *)header, ++ sizeof(*header)); ++ } ++ if (rbtversion != NULL && !newheader_nx) { ++ rbtversion->records += ++ dns_rdataslab_count((unsigned char *)newheader, ++ sizeof(*newheader)); ++ rbtversion->bytes += ++ dns_rdataslab_size((unsigned char *)newheader, ++ sizeof(*newheader)); ++ } ++ if (rbtversion != NULL) ++ RWUNLOCK(&rbtversion->rwlock, isc_rwlocktype_write); + if (loading) { + /* + * There are no other references to 'header' when +@@ -6355,6 +6401,16 @@ add32(dns_rbtdb_t *rbtdb, dns_rbtnode_t *rbtnode, rbtdb_version_t *rbtversion, + newheader->down = NULL; + rbtnode->data = newheader; + } ++ if (rbtversion != NULL && !newheader_nx) { ++ RWLOCK(&rbtversion->rwlock, isc_rwlocktype_write); ++ rbtversion->records += ++ dns_rdataslab_count((unsigned char *)newheader, ++ sizeof(*newheader)); ++ rbtversion->bytes += ++ dns_rdataslab_size((unsigned char *)newheader, ++ sizeof(*newheader)); ++ RWUNLOCK(&rbtversion->rwlock, isc_rwlocktype_write); ++ } + idx = newheader->node->locknum; + if (IS_CACHE(rbtdb)) { + ISC_LIST_PREPEND(rbtdb->rdatasets[idx], +@@ -6811,6 +6867,12 @@ subtractrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + */ + newheader->additional_auth = NULL; + newheader->additional_glue = NULL; ++ rbtversion->records += ++ dns_rdataslab_count((unsigned char *)newheader, ++ sizeof(*newheader)); ++ rbtversion->bytes += ++ dns_rdataslab_size((unsigned char *)newheader, ++ sizeof(*newheader)); + } else if (result == DNS_R_NXRRSET) { + /* + * This subtraction would remove all of the rdata; +@@ -6846,6 +6908,12 @@ subtractrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + * topheader. + */ + INSIST(rbtversion->serial >= topheader->serial); ++ rbtversion->records -= ++ dns_rdataslab_count((unsigned char *)header, ++ sizeof(*header)); ++ rbtversion->bytes -= ++ dns_rdataslab_size((unsigned char *)header, ++ sizeof(*header)); + if (topheader_prev != NULL) + topheader_prev->next = newheader; + else +@@ -7172,6 +7240,7 @@ rbt_datafixer(dns_rbtnode_t *rbtnode, void *base, size_t filesize, + unsigned char *limit = ((unsigned char *) base) + filesize; + unsigned char *p; + size_t size; ++ unsigned int count; + + REQUIRE(rbtnode != NULL); + +@@ -7179,6 +7248,9 @@ rbt_datafixer(dns_rbtnode_t *rbtnode, void *base, size_t filesize, + p = (unsigned char *) header; + + size = dns_rdataslab_size(p, sizeof(*header)); ++ count = dns_rdataslab_count(p, sizeof(*header));; ++ rbtdb->current_version->records += count; ++ rbtdb->current_version->bytes += size; + isc_crc64_update(crc, p, size); + #ifdef DEBUG + hexdump("hashing header", p, sizeof(rdatasetheader_t)); +@@ -7777,6 +7849,33 @@ getnsec3parameters(dns_db_t *db, dns_dbversion_t *version, dns_hash_t *hash, + } + + static isc_result_t ++getsize(dns_db_t *db, dns_dbversion_t *version, isc_uint64_t *records, ++ isc_uint64_t *bytes) ++{ ++ dns_rbtdb_t *rbtdb; ++ isc_result_t result = ISC_R_SUCCESS; ++ rbtdb_version_t *rbtversion = version; ++ ++ rbtdb = (dns_rbtdb_t *)db; ++ ++ REQUIRE(VALID_RBTDB(rbtdb)); ++ INSIST(rbtversion == NULL || rbtversion->rbtdb == rbtdb); ++ ++ if (rbtversion == NULL) ++ rbtversion = rbtdb->current_version; ++ ++ RWLOCK(&rbtversion->rwlock, isc_rwlocktype_read); ++ if (records != NULL) ++ *records = rbtversion->records; ++ ++ if (bytes != NULL) ++ *bytes = rbtversion->bytes; ++ RWUNLOCK(&rbtversion->rwlock, isc_rwlocktype_read); ++ ++ return (result); ++} ++ ++static isc_result_t + setsigningtime(dns_db_t *db, dns_rdataset_t *rdataset, isc_stdtime_t resign) { + dns_rbtdb_t *rbtdb = (dns_rbtdb_t *)db; + isc_stdtime_t oldresign; +@@ -7972,7 +8071,8 @@ static dns_dbmethods_t zone_methods = { + NULL, + NULL, + NULL, +- hashsize ++ hashsize, ++ getsize + }; + + static dns_dbmethods_t cache_methods = { +@@ -8018,7 +8118,8 @@ static dns_dbmethods_t cache_methods = { + NULL, + NULL, + setcachestats, +- hashsize ++ hashsize, ++ NULL + }; + + isc_result_t +@@ -8310,6 +8411,20 @@ dns_rbtdb_create + rbtdb->current_version->salt_length = 0; + memset(rbtdb->current_version->salt, 0, + sizeof(rbtdb->current_version->salt)); ++ result = isc_rwlock_init(&rbtdb->current_version->rwlock, 0, 0); ++ if (result != ISC_R_SUCCESS) { ++ isc_refcount_destroy(&rbtdb->current_version->references); ++ isc_mem_put(mctx, rbtdb->current_version, ++ sizeof(*rbtdb->current_version)); ++ rbtdb->current_version = NULL; ++ isc_refcount_decrement(&rbtdb->references, NULL); ++ isc_refcount_destroy(&rbtdb->references); ++ free_rbtdb(rbtdb, ISC_FALSE, NULL); ++ return (result); ++ } ++ ++ rbtdb->current_version->records = 0; ++ rbtdb->current_version->bytes = 0; + rbtdb->future_version = NULL; + ISC_LIST_INIT(rbtdb->open_versions); + /* +diff --git a/lib/dns/rdataslab.c b/lib/dns/rdataslab.c +index e29dc84..63e3728 100644 +--- a/lib/dns/rdataslab.c ++++ b/lib/dns/rdataslab.c +@@ -523,6 +523,19 @@ dns_rdataslab_size(unsigned char *slab, unsigned int reservelen) { + return ((unsigned int)(current - slab)); + } + ++unsigned int ++dns_rdataslab_count(unsigned char *slab, unsigned int reservelen) { ++ unsigned int count; ++ unsigned char *current; ++ ++ REQUIRE(slab != NULL); ++ ++ current = slab + reservelen; ++ count = *current++ * 256; ++ count += *current++; ++ return (count); ++} ++ + /* + * Make the dns_rdata_t 'rdata' refer to the slab item + * beginning at '*current', which is part of a slab of type +diff --git a/lib/dns/result.c b/lib/dns/result.c +index 7be4f57..a621909 100644 +--- a/lib/dns/result.c ++++ b/lib/dns/result.c +@@ -167,11 +167,16 @@ static const char *text[DNS_R_NRESULTS] = { + "covered by negative trust anchor", /*%< 110 DNS_R_NTACOVERED */ + "bad CDS", /*%< 111 DNS_R_BADCSD */ + "bad CDNSKEY", /*%< 112 DNS_R_BADCDNSKEY */ +- "malformed OPT option" /*%< 113 DNS_R_OPTERR */ ++ "malformed OPT option", /*%< 113 DNS_R_OPTERR */ ++ "malformed DNSTAP data", /*%< 114 DNS_R_BADDNSTAP */ ++ ++ "TSIG in wrong location", /*%< 115 DNS_R_BADTSIG */ ++ "SIG(0) in wrong location", /*%< 116 DNS_R_BADSIG0 */ ++ "too many records", /*%< 117 DNS_R_TOOMANYRECORDS */ + }; + + static const char *rcode_text[DNS_R_NRCODERESULTS] = { +- "NOERROR", /*%< 0 DNS_R_NOEROR */ ++ "NOERROR", /*%< 0 DNS_R_NOERROR */ + "FORMERR", /*%< 1 DNS_R_FORMERR */ + "SERVFAIL", /*%< 2 DNS_R_SERVFAIL */ + "NXDOMAIN", /*%< 3 DNS_R_NXDOMAIN */ +diff --git a/lib/dns/sdb.c b/lib/dns/sdb.c +index abfeeb0..19397e0 100644 +--- a/lib/dns/sdb.c ++++ b/lib/dns/sdb.c +@@ -1298,7 +1298,8 @@ static dns_dbmethods_t sdb_methods = { + findnodeext, + findext, + NULL, /* setcachestats */ +- NULL /* hashsize */ ++ NULL, /* hashsize */ ++ NULL /* getsize */ + }; + + static isc_result_t +diff --git a/lib/dns/sdlz.c b/lib/dns/sdlz.c +index b1198a4..0e3163d 100644 +--- a/lib/dns/sdlz.c ++++ b/lib/dns/sdlz.c +@@ -1269,7 +1269,8 @@ static dns_dbmethods_t sdlzdb_methods = { + findnodeext, + findext, + NULL, /* setcachestats */ +- NULL /* hashsize */ ++ NULL, /* hashsize */ ++ NULL /* getsize */ + }; + + /* +diff --git a/lib/dns/xfrin.c b/lib/dns/xfrin.c +index 2a6c1b4..ac566e1 100644 +--- a/lib/dns/xfrin.c ++++ b/lib/dns/xfrin.c +@@ -149,6 +149,9 @@ struct dns_xfrin_ctx { + unsigned int nrecs; /*%< Number of records recvd */ + isc_uint64_t nbytes; /*%< Number of bytes received */ + ++ unsigned int maxrecords; /*%< The maximum number of ++ records set for the zone */ ++ + isc_time_t start; /*%< Start time of the transfer */ + isc_time_t end; /*%< End time of the transfer */ + +@@ -309,10 +312,18 @@ axfr_putdata(dns_xfrin_ctx_t *xfr, dns_diffop_t op, + static isc_result_t + axfr_apply(dns_xfrin_ctx_t *xfr) { + isc_result_t result; ++ isc_uint64_t records; + + CHECK(dns_diff_load(&xfr->diff, xfr->axfr.add, xfr->axfr.add_private)); + xfr->difflen = 0; + dns_diff_clear(&xfr->diff); ++ if (xfr->maxrecords != 0U) { ++ result = dns_db_getsize(xfr->db, xfr->ver, &records, NULL); ++ if (result == ISC_R_SUCCESS && records > xfr->maxrecords) { ++ result = DNS_R_TOOMANYRECORDS; ++ goto failure; ++ } ++ } + result = ISC_R_SUCCESS; + failure: + return (result); +@@ -396,6 +407,7 @@ ixfr_putdata(dns_xfrin_ctx_t *xfr, dns_diffop_t op, + static isc_result_t + ixfr_apply(dns_xfrin_ctx_t *xfr) { + isc_result_t result; ++ isc_uint64_t records; + + if (xfr->ver == NULL) { + CHECK(dns_db_newversion(xfr->db, &xfr->ver)); +@@ -403,6 +415,13 @@ ixfr_apply(dns_xfrin_ctx_t *xfr) { + CHECK(dns_journal_begin_transaction(xfr->ixfr.journal)); + } + CHECK(dns_diff_apply(&xfr->diff, xfr->db, xfr->ver)); ++ if (xfr->maxrecords != 0U) { ++ result = dns_db_getsize(xfr->db, xfr->ver, &records, NULL); ++ if (result == ISC_R_SUCCESS && records > xfr->maxrecords) { ++ result = DNS_R_TOOMANYRECORDS; ++ goto failure; ++ } ++ } + if (xfr->ixfr.journal != NULL) { + result = dns_journal_writediff(xfr->ixfr.journal, &xfr->diff); + if (result != ISC_R_SUCCESS) +@@ -759,7 +778,7 @@ xfrin_reset(dns_xfrin_ctx_t *xfr) { + + static void + xfrin_fail(dns_xfrin_ctx_t *xfr, isc_result_t result, const char *msg) { +- if (result != DNS_R_UPTODATE) { ++ if (result != DNS_R_UPTODATE && result != DNS_R_TOOMANYRECORDS) { + xfrin_log(xfr, ISC_LOG_ERROR, "%s: %s", + msg, isc_result_totext(result)); + if (xfr->is_ixfr) +@@ -852,6 +871,7 @@ xfrin_create(isc_mem_t *mctx, + xfr->nmsg = 0; + xfr->nrecs = 0; + xfr->nbytes = 0; ++ xfr->maxrecords = dns_zone_getmaxrecords(zone); + isc_time_now(&xfr->start); + + xfr->tsigkey = NULL; +diff --git a/lib/dns/zone.c b/lib/dns/zone.c +index 90e558d..2b0d8e4 100644 +--- a/lib/dns/zone.c ++++ b/lib/dns/zone.c +@@ -253,6 +253,8 @@ struct dns_zone { + isc_uint32_t maxretry; + isc_uint32_t minretry; + ++ isc_uint32_t maxrecords; ++ + isc_sockaddr_t *masters; + isc_dscp_t *masterdscps; + dns_name_t **masterkeynames; +@@ -10088,6 +10090,20 @@ dns_zone_setmaxretrytime(dns_zone_t *zone, isc_uint32_t val) { + zone->maxretry = val; + } + ++isc_uint32_t ++dns_zone_getmaxrecords(dns_zone_t *zone) { ++ REQUIRE(DNS_ZONE_VALID(zone)); ++ ++ return (zone->maxrecords); ++} ++ ++void ++dns_zone_setmaxrecords(dns_zone_t *zone, isc_uint32_t val) { ++ REQUIRE(DNS_ZONE_VALID(zone)); ++ ++ zone->maxrecords = val; ++} ++ + static isc_boolean_t + notify_isqueued(dns_zone_t *zone, unsigned int flags, dns_name_t *name, + isc_sockaddr_t *addr, dns_tsigkey_t *key) +@@ -14431,7 +14447,7 @@ zone_xfrdone(dns_zone_t *zone, isc_result_t result) { + DNS_ZONE_CLRFLAG(zone, DNS_ZONEFLG_SOABEFOREAXFR); + + TIME_NOW(&now); +- switch (result) { ++ switch (xfrresult) { + case ISC_R_SUCCESS: + DNS_ZONE_SETFLAG(zone, DNS_ZONEFLG_NEEDNOTIFY); + /*FALLTHROUGH*/ +@@ -14558,6 +14574,11 @@ zone_xfrdone(dns_zone_t *zone, isc_result_t result) { + DNS_ZONE_SETFLAG(zone, DNS_ZONEFLAG_NOIXFR); + goto same_master; + ++ case DNS_R_TOOMANYRECORDS: ++ DNS_ZONE_JITTER_ADD(&now, zone->refresh, &zone->refreshtime); ++ inc_stats(zone, dns_zonestatscounter_xfrfail); ++ break; ++ + default: + next_master: + /* +diff --git a/lib/isccfg/namedconf.c b/lib/isccfg/namedconf.c +index 780ab46..e7ff1cc 100644 +--- a/lib/isccfg/namedconf.c ++++ b/lib/isccfg/namedconf.c +@@ -1679,6 +1679,7 @@ zone_clauses[] = { + { "masterfile-format", &cfg_type_masterformat, 0 }, + { "max-ixfr-log-size", &cfg_type_size, CFG_CLAUSEFLAG_OBSOLETE }, + { "max-journal-size", &cfg_type_sizenodefault, 0 }, ++ { "max-records", &cfg_type_uint32, 0 }, + { "max-refresh-time", &cfg_type_uint32, 0 }, + { "max-retry-time", &cfg_type_uint32, 0 }, + { "max-transfer-idle-in", &cfg_type_uint32, 0 }, +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-8864.patch b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-8864.patch new file mode 100644 index 000000000..b52d6800f --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind/CVE-2016-8864.patch @@ -0,0 +1,219 @@ +From c1d0599a246f646d1c22018f8fa09459270a44b8 Mon Sep 17 00:00:00 2001 +From: Mark Andrews <marka@isc.org> +Date: Fri, 21 Oct 2016 14:55:10 +1100 +Subject: [PATCH] 4489. [security] It was possible to trigger assertions when + processing a response. (CVE-2016-8864) [RT #43465] + +(cherry picked from commit bd6f27f5c353133b563fe69100b2f168c129f3ca) + +Upstream-Status: Backport +[https://source.isc.org/cgi-bin/gitweb.cgi?p=bind9.git;a=commit;h=c1d0599a246f646d1c22018f8fa09459270a44b8] + +CVE: CVE-2016-8864 + +Signed-off-by: Yi Zhao <yi.zhao@windriver.com> +--- + CHANGES | 3 +++ + lib/dns/resolver.c | 69 +++++++++++++++++++++++++++++++++++++----------------- + 2 files changed, 50 insertions(+), 22 deletions(-) + +diff --git a/CHANGES b/CHANGES +index 5c8c61a..41cfce5 100644 +--- a/CHANGES ++++ b/CHANGES +@@ -1,3 +1,6 @@ ++4489. [security] It was possible to trigger assertions when processing ++ a response. (CVE-2016-8864) [RT #43465] ++ + 4467. [security] It was possible to trigger an assertion when + rendering a message. (CVE-2016-2776) [RT #43139] + +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index ba1ae23..13c8b44 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -612,7 +612,9 @@ valcreate(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, dns_name_t *name, + valarg->addrinfo = addrinfo; + + if (!ISC_LIST_EMPTY(fctx->validators)) +- INSIST((valoptions & DNS_VALIDATOR_DEFER) != 0); ++ valoptions |= DNS_VALIDATOR_DEFER; ++ else ++ valoptions &= ~DNS_VALIDATOR_DEFER; + + result = dns_validator_create(fctx->res->view, name, type, rdataset, + sigrdataset, fctx->rmessage, +@@ -5526,13 +5528,6 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + rdataset, + sigrdataset, + valoptions, task); +- /* +- * Defer any further validations. +- * This prevents multiple validators +- * from manipulating fctx->rmessage +- * simultaneously. +- */ +- valoptions |= DNS_VALIDATOR_DEFER; + } + } else if (CHAINING(rdataset)) { + if (rdataset->type == dns_rdatatype_cname) +@@ -5647,6 +5642,11 @@ cache_name(fetchctx_t *fctx, dns_name_t *name, dns_adbaddrinfo_t *addrinfo, + eresult == DNS_R_NCACHENXRRSET); + } + event->result = eresult; ++ if (adbp != NULL && *adbp != NULL) { ++ if (anodep != NULL && *anodep != NULL) ++ dns_db_detachnode(*adbp, anodep); ++ dns_db_detach(adbp); ++ } + dns_db_attach(fctx->cache, adbp); + dns_db_transfernode(fctx->cache, &node, anodep); + clone_results(fctx); +@@ -5897,6 +5897,11 @@ ncache_message(fetchctx_t *fctx, dns_adbaddrinfo_t *addrinfo, + fctx->attributes |= FCTX_ATTR_HAVEANSWER; + if (event != NULL) { + event->result = eresult; ++ if (adbp != NULL && *adbp != NULL) { ++ if (anodep != NULL && *anodep != NULL) ++ dns_db_detachnode(*adbp, anodep); ++ dns_db_detach(adbp); ++ } + dns_db_attach(fctx->cache, adbp); + dns_db_transfernode(fctx->cache, &node, anodep); + clone_results(fctx); +@@ -6718,13 +6723,15 @@ static isc_result_t + answer_response(fetchctx_t *fctx) { + isc_result_t result; + dns_message_t *message; +- dns_name_t *name, *dname, *qname, tname, *ns_name; ++ dns_name_t *name, *dname = NULL, *qname, *dqname, tname, *ns_name; ++ dns_name_t *cname = NULL; + dns_rdataset_t *rdataset, *ns_rdataset; + isc_boolean_t done, external, chaining, aa, found, want_chaining; +- isc_boolean_t have_answer, found_cname, found_type, wanted_chaining; ++ isc_boolean_t have_answer, found_cname, found_dname, found_type; ++ isc_boolean_t wanted_chaining; + unsigned int aflag; + dns_rdatatype_t type; +- dns_fixedname_t fdname, fqname; ++ dns_fixedname_t fdname, fqname, fqdname; + dns_view_t *view; + + FCTXTRACE("answer_response"); +@@ -6738,6 +6745,7 @@ answer_response(fetchctx_t *fctx) { + + done = ISC_FALSE; + found_cname = ISC_FALSE; ++ found_dname = ISC_FALSE; + found_type = ISC_FALSE; + chaining = ISC_FALSE; + have_answer = ISC_FALSE; +@@ -6747,12 +6755,13 @@ answer_response(fetchctx_t *fctx) { + aa = ISC_TRUE; + else + aa = ISC_FALSE; +- qname = &fctx->name; ++ dqname = qname = &fctx->name; + type = fctx->type; + view = fctx->res->view; ++ dns_fixedname_init(&fqdname); + result = dns_message_firstname(message, DNS_SECTION_ANSWER); + while (!done && result == ISC_R_SUCCESS) { +- dns_namereln_t namereln; ++ dns_namereln_t namereln, dnamereln; + int order; + unsigned int nlabels; + +@@ -6760,6 +6769,8 @@ answer_response(fetchctx_t *fctx) { + dns_message_currentname(message, DNS_SECTION_ANSWER, &name); + external = ISC_TF(!dns_name_issubdomain(name, &fctx->domain)); + namereln = dns_name_fullcompare(qname, name, &order, &nlabels); ++ dnamereln = dns_name_fullcompare(dqname, name, &order, ++ &nlabels); + if (namereln == dns_namereln_equal) { + wanted_chaining = ISC_FALSE; + for (rdataset = ISC_LIST_HEAD(name->list); +@@ -6854,7 +6865,7 @@ answer_response(fetchctx_t *fctx) { + } + } else if (rdataset->type == dns_rdatatype_rrsig + && rdataset->covers == +- dns_rdatatype_cname ++ dns_rdatatype_cname + && !found_type) { + /* + * We're looking for something else, +@@ -6884,11 +6895,18 @@ answer_response(fetchctx_t *fctx) { + * a CNAME or DNAME). + */ + INSIST(!external); +- if (aflag == +- DNS_RDATASETATTR_ANSWER) { ++ if ((rdataset->type != ++ dns_rdatatype_cname) || ++ !found_dname || ++ (aflag == ++ DNS_RDATASETATTR_ANSWER)) ++ { + have_answer = ISC_TRUE; ++ if (rdataset->type == ++ dns_rdatatype_cname) ++ cname = name; + name->attributes |= +- DNS_NAMEATTR_ANSWER; ++ DNS_NAMEATTR_ANSWER; + } + rdataset->attributes |= aflag; + if (aa) +@@ -6982,11 +7000,11 @@ answer_response(fetchctx_t *fctx) { + return (DNS_R_FORMERR); + } + +- if (namereln != dns_namereln_subdomain) { ++ if (dnamereln != dns_namereln_subdomain) { + char qbuf[DNS_NAME_FORMATSIZE]; + char obuf[DNS_NAME_FORMATSIZE]; + +- dns_name_format(qname, qbuf, ++ dns_name_format(dqname, qbuf, + sizeof(qbuf)); + dns_name_format(name, obuf, + sizeof(obuf)); +@@ -7001,7 +7019,7 @@ answer_response(fetchctx_t *fctx) { + want_chaining = ISC_TRUE; + POST(want_chaining); + aflag = DNS_RDATASETATTR_ANSWER; +- result = dname_target(rdataset, qname, ++ result = dname_target(rdataset, dqname, + nlabels, &fdname); + if (result == ISC_R_NOSPACE) { + /* +@@ -7018,10 +7036,13 @@ answer_response(fetchctx_t *fctx) { + + dname = dns_fixedname_name(&fdname); + if (!is_answertarget_allowed(view, +- qname, rdataset->type, +- dname, &fctx->domain)) { ++ dqname, rdataset->type, ++ dname, &fctx->domain)) ++ { + return (DNS_R_SERVFAIL); + } ++ dqname = dns_fixedname_name(&fqdname); ++ dns_name_copy(dname, dqname, NULL); + } else { + /* + * We've found a signature that +@@ -7046,6 +7067,10 @@ answer_response(fetchctx_t *fctx) { + INSIST(!external); + if (aflag == DNS_RDATASETATTR_ANSWER) { + have_answer = ISC_TRUE; ++ found_dname = ISC_TRUE; ++ if (cname != NULL) ++ cname->attributes &= ++ ~DNS_NAMEATTR_ANSWER; + name->attributes |= + DNS_NAMEATTR_ANSWER; + } +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.3-P3.bb b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.3-P3.bb index 4e2e856b7..816062528 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.3-P3.bb +++ b/import-layers/yocto-poky/meta/recipes-connectivity/bind/bind_9.10.3-P3.bb @@ -27,6 +27,8 @@ SRC_URI = "ftp://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \ file://CVE-2016-2088.patch \ file://CVE-2016-2775.patch \ file://CVE-2016-2776.patch \ + file://CVE-2016-8864.patch \ + file://CVE-2016-6170.patch \ " SRC_URI[md5sum] = "bcf7e772b616f7259420a3edc5df350a" diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/0003-stats-Fix-bad-file-descriptor-initialisation.patch b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/0003-stats-Fix-bad-file-descriptor-initialisation.patch new file mode 100644 index 000000000..c545811ee --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/0003-stats-Fix-bad-file-descriptor-initialisation.patch @@ -0,0 +1,102 @@ +From c7f4151fb053b0d0691d8f10d7e3690265d28889 Mon Sep 17 00:00:00 2001 +From: Lukasz Nowak <lnowak@tycoint.com> +Date: Wed, 26 Oct 2016 18:13:02 +0100 +Subject: [PATCH] stats: Fix bad file descriptor initialisation + +Stats file code initialises its file descriptor field to 0. But 0 is +a valid fd value. -1 should be used instead. This causes problems +when an error happens before a stats file is open (e.g. mkdir +fails). The clean-up procedure, stats_free() calls close(fd). When fd +is 0, this first closes stdin, and then any files/sockets which +received fd=0, re-used by the OS. + +Fixed several instances of bad file descriptor field handling, in case +of errors. + +The bug results with connman freezing if there is no read/write storage +directory available, and there are multiple active interfaces +(fd=0 gets re-used for sockets in that case). + +The patch was imported from the Connman git repository +(git://git.kernel.org/pub/scm/network/connman) as of commit id +c7f4151fb053b0d0691d8f10d7e3690265d28889. + +Upstream-Status: Accepted +Signed-off-by: Lukasz Nowak <lnowak@tycoint.com> +--- + src/stats.c | 15 +++++++++++++++ + src/util.c | 4 ++-- + 2 files changed, 17 insertions(+), 2 deletions(-) + +diff --git a/src/stats.c b/src/stats.c +index 26343b1..c3ca738 100644 +--- a/src/stats.c ++++ b/src/stats.c +@@ -378,6 +378,7 @@ static int stats_file_setup(struct stats_file *file) + strerror(errno), file->name); + + TFR(close(file->fd)); ++ file->fd = -1; + g_free(file->name); + file->name = NULL; + +@@ -393,6 +394,7 @@ static int stats_file_setup(struct stats_file *file) + err = stats_file_remap(file, size); + if (err < 0) { + TFR(close(file->fd)); ++ file->fd = -1; + g_free(file->name); + file->name = NULL; + +@@ -649,6 +651,13 @@ static int stats_file_history_update(struct stats_file *data_file) + bzero(history_file, sizeof(struct stats_file)); + bzero(temp_file, sizeof(struct stats_file)); + ++ /* ++ * 0 is a valid file descriptor - fd needs to be initialized ++ * to -1 to handle errors correctly ++ */ ++ history_file->fd = -1; ++ temp_file->fd = -1; ++ + err = stats_open(history_file, data_file->history_name); + if (err < 0) + return err; +@@ -682,6 +691,12 @@ int __connman_stats_service_register(struct connman_service *service) + if (!file) + return -ENOMEM; + ++ /* ++ * 0 is a valid file descriptor - fd needs to be initialized ++ * to -1 to handle errors correctly ++ */ ++ file->fd = -1; ++ + g_hash_table_insert(stats_hash, service, file); + } else { + return -EALREADY; +diff --git a/src/util.c b/src/util.c +index e6532c8..732d451 100644 +--- a/src/util.c ++++ b/src/util.c +@@ -63,7 +63,7 @@ int __connman_util_init(void) + { + int r = 0; + +- if (f > 0) ++ if (f >= 0) + return 0; + + f = open(URANDOM, O_RDONLY); +@@ -86,7 +86,7 @@ int __connman_util_init(void) + + void __connman_util_cleanup(void) + { +- if (f > 0) ++ if (f >= 0) + close(f); + + f = -1; +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/CVE-2017-12865.patch b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/CVE-2017-12865.patch new file mode 100644 index 000000000..45f78f10e --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman/CVE-2017-12865.patch @@ -0,0 +1,87 @@ +From 5c281d182ecdd0a424b64f7698f32467f8f67b71 Mon Sep 17 00:00:00 2001 +From: Jukka Rissanen <jukka.rissanen@linux.intel.com> +Date: Wed, 9 Aug 2017 10:16:46 +0300 +Subject: dnsproxy: Fix crash on malformed DNS response + +If the response query string is malformed, we might access memory +pass the end of "name" variable in parse_response(). + +CVE: CVE-2017-12865 +Upstream-Status: Backport [https://git.kernel.org/pub/scm/network/connman/connman.git/patch/?id=5c281d182ecdd0a424b64f7698f32467f8f67b71] + +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> +--- + src/dnsproxy.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/src/dnsproxy.c b/src/dnsproxy.c +index 38ac5bf..40b4f15 100644 +--- a/src/dnsproxy.c ++++ b/src/dnsproxy.c +@@ -838,7 +838,7 @@ static struct cache_entry *cache_check(gpointer request, int *qtype, int proto) + static int get_name(int counter, + unsigned char *pkt, unsigned char *start, unsigned char *max, + unsigned char *output, int output_max, int *output_len, +- unsigned char **end, char *name, int *name_len) ++ unsigned char **end, char *name, size_t max_name, int *name_len) + { + unsigned char *p; + +@@ -859,7 +859,7 @@ static int get_name(int counter, + + return get_name(counter + 1, pkt, pkt + offset, max, + output, output_max, output_len, end, +- name, name_len); ++ name, max_name, name_len); + } else { + unsigned label_len = *p; + +@@ -869,6 +869,9 @@ static int get_name(int counter, + if (*output_len > output_max) + return -ENOBUFS; + ++ if ((*name_len + 1 + label_len + 1) > max_name) ++ return -ENOBUFS; ++ + /* + * We need the original name in order to check + * if this answer is the correct one. +@@ -900,14 +903,14 @@ static int parse_rr(unsigned char *buf, unsigned char *start, + unsigned char *response, unsigned int *response_size, + uint16_t *type, uint16_t *class, int *ttl, int *rdlen, + unsigned char **end, +- char *name) ++ char *name, size_t max_name) + { + struct domain_rr *rr; + int err, offset; + int name_len = 0, output_len = 0, max_rsp = *response_size; + + err = get_name(0, buf, start, max, response, max_rsp, +- &output_len, end, name, &name_len); ++ &output_len, end, name, max_name, &name_len); + if (err < 0) + return err; + +@@ -1033,7 +1036,8 @@ static int parse_response(unsigned char *buf, int buflen, + memset(rsp, 0, sizeof(rsp)); + + ret = parse_rr(buf, ptr, buf + buflen, rsp, &rsp_len, +- type, class, ttl, &rdlen, &next, name); ++ type, class, ttl, &rdlen, &next, name, ++ sizeof(name) - 1); + if (ret != 0) { + err = ret; + goto out; +@@ -1099,7 +1103,7 @@ static int parse_response(unsigned char *buf, int buflen, + */ + ret = get_name(0, buf, next - rdlen, buf + buflen, + rsp, rsp_len, &output_len, &end, +- name, &name_len); ++ name, sizeof(name) - 1, &name_len); + if (ret != 0) { + /* just ignore the error at this point */ + ptr = next; +-- +cgit v1.1 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman_1.33.bb b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman_1.33.bb index 6ea1a08dc..d8793ac8b 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman_1.33.bb +++ b/import-layers/yocto-poky/meta/recipes-connectivity/connman/connman_1.33.bb @@ -5,6 +5,8 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/network/${BPN}/${BP}.tar.xz \ file://connman \ file://no-version-scripts.patch \ file://includes.patch \ + file://0003-stats-Fix-bad-file-descriptor-initialisation.patch \ + file://CVE-2017-12865.patch \ " SRC_URI_append_libc-musl = " file://0002-resolve-musl-does-not-implement-res_ninit.patch" diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils/fix-protocol-minor-version-fall-back.patch b/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils/fix-protocol-minor-version-fall-back.patch new file mode 100644 index 000000000..683246c4a --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils/fix-protocol-minor-version-fall-back.patch @@ -0,0 +1,55 @@ +From 78bb645a42c216b37b8d930c7c849a3fa89babf8 Mon Sep 17 00:00:00 2001 +From: Takashi Iwai <tiwai@suse.com> +Date: Sat, 16 Jan 2016 12:02:30 -0500 +Subject: [PATCH] Fix protocol minor version fall-back + +mount.nfs currently expects mount(2) to fail with EPROTONOSUPPORT if +the kernel doesn't understand the requested NFS version. + +Unfortunately if the requested minor is not known to the kernel +it returns -EINVAL. +In kernels since 3.11 this can happen in nfs4_alloc_client(), if +compiled without NFS_V4_2. + +More generally it can happen in in nfs_validate_text_mount_data() +when nfs_parse_mount_options() returns 0 because +nfs_parse_version_string() +didn't recognise the version. + +EPROTONOSUPPORT is only returned if NFSv4 support is completely compiled +out. + +So nfs_autonegotiate needs to check for EINVAL as well as +EPROTONOSUPPORT. + +URL: https://bugzilla.opensuse.org/show_bug.cgi?id=959211 +Reported-by: Takashi Iwai <tiwai@suse.com> +Signed-off-by: NeilBrown <neilb@suse.com> +Signed-off-by: Steve Dickson <steved@redhat.com> + + +Upstream-Status: Backport +http://git.linux-nfs.org/?p=steved/nfs-utils.git;a=patch;h=78bb645a42c216b37b8d930c7c849a3fa89babf8 + +Signed-off-by: Yi Zhao <yi.zhao@windriver.com> +--- + utils/mount/stropts.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/utils/mount/stropts.c b/utils/mount/stropts.c +index c8f5a6d..86829a9 100644 +--- a/utils/mount/stropts.c ++++ b/utils/mount/stropts.c +@@ -841,6 +841,9 @@ check_result: + case EPROTONOSUPPORT: + /* A clear indication that the server or our + * client does not support NFS version 4 and minor */ ++ case EINVAL: ++ /* A less clear indication that our client ++ * does not support NFSv4 minor version. */ + if (mi->version.v_mode == V_GENERAL && + mi->version.minor == 0) + return result; +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils_1.3.3.bb b/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils_1.3.3.bb index 8540503df..a2bebe0ba 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils_1.3.3.bb +++ b/import-layers/yocto-poky/meta/recipes-connectivity/nfs-utils/nfs-utils_1.3.3.bb @@ -33,6 +33,7 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/nfs-utils/${PV}/nfs-utils-${PV}.tar.x file://nfs-utils-debianize-start-statd.patch \ file://0001-nfs-utils-statd-fix-a-segfault-caused-by-improper-us.patch \ file://bugfix-adjust-statd-service-name.patch \ + file://fix-protocol-minor-version-fall-back.patch \ " SRC_URI[md5sum] = "cd6b568c2e9301cc3bfac09d87fbbc0b" diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh/fix-CVE-2016-8858.patch b/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh/fix-CVE-2016-8858.patch new file mode 100644 index 000000000..b26ee81b9 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh/fix-CVE-2016-8858.patch @@ -0,0 +1,39 @@ +Fix CVE-2016-8858 of openssh + +Backport patch from upstream and drop the change of comment which can NOT be applied. + +Upstream-Status: Backport [ https://anongit.mindrot.org/openssh.git/commit/?id=ec165c3 ] +CVE: CVE-2016-8858 + +Signed-off-by: Kai Kang <kai.kang@windriver.com> +--- +From ec165c392ca54317dbe3064a8c200de6531e89ad Mon Sep 17 00:00:00 2001 +From: "markus@openbsd.org" <markus@openbsd.org> +Date: Mon, 10 Oct 2016 19:28:48 +0000 +Subject: [PATCH] upstream commit + +Unregister the KEXINIT handler after message has been +received. Otherwise an unauthenticated peer can repeat the KEXINIT and cause +allocation of up to 128MB -- until the connection is closed. Reported by +shilei-c at 360.cn + +Upstream-ID: 43649ae12a27ef94290db16d1a98294588b75c05 +--- + kex.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/kex.c b/kex.c +index 3f97f8c..6a94bc5 100644 +--- a/kex.c ++++ b/kex.c +@@ -481,6 +481,7 @@ kex_input_kexinit(int type, u_int32_t seq, void *ctxt) + if (kex == NULL) + return SSH_ERR_INVALID_ARGUMENT; + ++ ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, NULL); + ptr = sshpkt_ptr(ssh, &dlen); + if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0) + return r; +-- +2.10.1 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.3p1.bb b/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.3p1.bb index 039b0ffdd..94eb0ed20 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.3p1.bb +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssh/openssh_7.3p1.bb @@ -25,6 +25,7 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar file://openssh-7.1p1-conditional-compile-des-in-cipher.patch \ file://openssh-7.1p1-conditional-compile-des-in-pkcs11.patch \ file://fix-potential-signed-overflow-in-pointer-arithmatic.patch \ + file://fix-CVE-2016-8858.patch \ " PAM_SRC_URI = "file://sshd" diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc index f3a2c5abd..2ef8b38be 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl.inc @@ -107,18 +107,24 @@ do_configure () { linux-gnu64-x86_64) target=linux-x86_64 ;; - linux-mips) - target=debian-mips + linux-gnun32-mips*el) + target=debian-mipsn32el + ;; + linux-gnun32-mips*) + target=debian-mipsn32 + ;; + linux-mips*64*el) + target=debian-mips64el ;; - linux-mipsel) + linux-mips*64*) + target=debian-mips64 + ;; + linux-mips*el) target=debian-mipsel ;; - linux-*-mips64 | linux-mips64) - target=debian-mips64 - ;; - linux-*-mips64el | linux-mips64el) - target=debian-mips64el - ;; + linux-mips*) + target=debian-mips + ;; linux-microblaze*|linux-nios2*) target=linux-generic32 ;; diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-CVE-2017-3731.patch b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-CVE-2017-3731.patch new file mode 100644 index 000000000..04ef52682 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0001-CVE-2017-3731.patch @@ -0,0 +1,46 @@ +From 0cde9a9645c949fd0acf657dadc747676245cfaf Mon Sep 17 00:00:00 2001 +From: Alexandru Moise <alexandru.moise@windriver.com> +Date: Tue, 7 Feb 2017 11:13:19 +0200 +Subject: [PATCH 1/2] crypto/evp: harden RC4_MD5 cipher. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Originally a crash in 32-bit build was reported CHACHA20-POLY1305 +cipher. The crash is triggered by truncated packet and is result +of excessive hashing to the edge of accessible memory (or bogus +MAC value is produced if x86 MD5 assembly module is involved). Since +hash operation is read-only it is not considered to be exploitable +beyond a DoS condition. + +Thanks to Robert ĹšwiÄ™cki for report. + +CVE-2017-3731 + +Backported from upstream commit: +8e20499629b6bcf868d0072c7011e590b5c2294d + +Upstream-Status: Backport + +Reviewed-by: Rich Salz <rsalz@openssl.org> +Signed-off-by: Alexandru Moise <alexandru.moise@windriver.com> +--- + crypto/evp/e_rc4_hmac_md5.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/crypto/evp/e_rc4_hmac_md5.c b/crypto/evp/e_rc4_hmac_md5.c +index 5e92855..3293419 100644 +--- a/crypto/evp/e_rc4_hmac_md5.c ++++ b/crypto/evp/e_rc4_hmac_md5.c +@@ -269,6 +269,8 @@ static int rc4_hmac_md5_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, + len = p[arg - 2] << 8 | p[arg - 1]; + + if (!ctx->encrypt) { ++ if (len < MD5_DIGEST_LENGTH) ++ return -1; + len -= MD5_DIGEST_LENGTH; + p[arg - 2] = len >> 8; + p[arg - 1] = len; +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0002-CVE-2017-3731.patch b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0002-CVE-2017-3731.patch new file mode 100644 index 000000000..b56b2d5bd --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/0002-CVE-2017-3731.patch @@ -0,0 +1,53 @@ +From 6427f1accc54b515bb899370f1a662bfcb1caa52 Mon Sep 17 00:00:00 2001 +From: Alexandru Moise <alexandru.moise@windriver.com> +Date: Tue, 7 Feb 2017 11:16:13 +0200 +Subject: [PATCH 2/2] crypto/evp: harden AEAD ciphers. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Originally a crash in 32-bit build was reported CHACHA20-POLY1305 +cipher. The crash is triggered by truncated packet and is result +of excessive hashing to the edge of accessible memory. Since hash +operation is read-only it is not considered to be exploitable +beyond a DoS condition. Other ciphers were hardened. + +Thanks to Robert ĹšwiÄ™cki for report. + +CVE-2017-3731 + +Backported from upstream commit: +2198b3a55de681e1f3c23edb0586afe13f438051 + +Upstream-Status: Backport + +Reviewed-by: Rich Salz <rsalz@openssl.org> +Signed-off-by: Alexandru Moise <alexandru.moise@windriver.com> +--- + crypto/evp/e_aes.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/crypto/evp/e_aes.c b/crypto/evp/e_aes.c +index 1734a82..16dcd10 100644 +--- a/crypto/evp/e_aes.c ++++ b/crypto/evp/e_aes.c +@@ -1235,10 +1235,15 @@ static int aes_gcm_ctrl(EVP_CIPHER_CTX *c, int type, int arg, void *ptr) + { + unsigned int len = c->buf[arg - 2] << 8 | c->buf[arg - 1]; + /* Correct length for explicit IV */ ++ if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN) ++ return 0; + len -= EVP_GCM_TLS_EXPLICIT_IV_LEN; + /* If decrypting correct for tag too */ +- if (!c->encrypt) ++ if (!c->encrypt) { ++ if (len < EVP_GCM_TLS_TAG_LEN) ++ return 0; + len -= EVP_GCM_TLS_TAG_LEN; ++ } + c->buf[arg - 2] = len >> 8; + c->buf[arg - 1] = len & 0xff; + } +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-7055.patch b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-7055.patch new file mode 100644 index 000000000..83a74cdac --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/CVE-2016-7055.patch @@ -0,0 +1,43 @@ +From 57c4b9f6a2f800b41ce2836986fe33640f6c3f8a Mon Sep 17 00:00:00 2001 +From: Andy Polyakov <appro@openssl.org> +Date: Sun, 6 Nov 2016 18:33:17 +0100 +Subject: [PATCH] bn/asm/x86_64-mont.pl: fix for CVE-2016-7055 (Low severity). + +Reviewed-by: Rich Salz <rsalz@openssl.org> +(cherry picked from commit 2fac86d9abeaa643677d1ffd0a139239fdf9406a) + +Upstream-Status: Backport [https://github.com/openssl/openssl/commit/57c4b9f6a2f800b41ce2836986fe33640f6c3f8a] +CVE: CVE-2016-7055 +Signed-off-by: Yi Zhao <yi.zhao@windriver.com> +--- + crypto/bn/asm/x86_64-mont.pl | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/crypto/bn/asm/x86_64-mont.pl b/crypto/bn/asm/x86_64-mont.pl +index 044fd7e..80492d8 100755 +--- a/crypto/bn/asm/x86_64-mont.pl ++++ b/crypto/bn/asm/x86_64-mont.pl +@@ -1148,18 +1148,17 @@ $code.=<<___; + mulx 2*8($aptr),%r15,%r13 # ... + adox -3*8($tptr),%r11 + adcx %r15,%r12 +- adox $zero,%r12 ++ adox -2*8($tptr),%r12 + adcx $zero,%r13 ++ adox $zero,%r13 + + mov $bptr,8(%rsp) # off-load &b[i] +- .byte 0x67 + mov $mi,%r15 + imulq 24(%rsp),$mi # "t[0]"*n0 + xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0 + + mulx 3*8($aptr),%rax,%r14 + mov $mi,%rdx +- adox -2*8($tptr),%r12 + adcx %rax,%r13 + adox -1*8($tptr),%r13 + adcx $zero,%r14 +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/Use-SHA256-not-MD5-as-default-digest.patch b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/Use-SHA256-not-MD5-as-default-digest.patch new file mode 100644 index 000000000..58c9ee784 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/Use-SHA256-not-MD5-as-default-digest.patch @@ -0,0 +1,69 @@ +From d795f5f20a29adecf92c09459a3ee07ffac01a99 Mon Sep 17 00:00:00 2001 +From: Rich Salz <rsalz@akamai.com> +Date: Sat, 13 Jun 2015 17:03:39 -0400 +Subject: [PATCH] Use SHA256 not MD5 as default digest. + +Commit f8547f62c212837dbf44fb7e2755e5774a59a57b upstream. + +Upstream-Status: Backport +Backport from OpenSSL 2.0 to OpenSSL 1.0.2 +Commit f8547f62c212837dbf44fb7e2755e5774a59a57b + +CVE: CVE-2004-2761 + + The MD5 Message-Digest Algorithm is not collision resistant, + which makes it easier for context-dependent attackers to + conduct spoofing attacks, as demonstrated by attacks on the + use of MD5 in the signature algorithm of an X.509 certificate. + +Reviewed-by: Viktor Dukhovni <viktor@openssl.org> +Signed-off-by: Zhang Xiao <xiao.zhang@windriver.com> +Signed-off-by: T.O. Radzy Radzykewycz <radzy@windriver.com> +--- + apps/ca.c | 2 +- + apps/dgst.c | 2 +- + apps/enc.c | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/apps/ca.c b/apps/ca.c +index 3b7336c..8f3a84b 100644 +--- a/apps/ca.c ++++ b/apps/ca.c +@@ -1612,7 +1612,7 @@ static int certify_cert(X509 **xret, char *infile, EVP_PKEY *pkey, X509 *x509, + } else + BIO_printf(bio_err, "Signature ok\n"); + +- if ((rreq = X509_to_X509_REQ(req, NULL, EVP_md5())) == NULL) ++ if ((rreq = X509_to_X509_REQ(req, NULL, NULL)) == NULL) + goto err; + + ok = do_body(xret, pkey, x509, dgst, sigopts, policy, db, serial, subj, +diff --git a/apps/dgst.c b/apps/dgst.c +index 95e5fa3..0d1529f 100644 +--- a/apps/dgst.c ++++ b/apps/dgst.c +@@ -442,7 +442,7 @@ int MAIN(int argc, char **argv) + goto end; + } + if (md == NULL) +- md = EVP_md5(); ++ md = EVP_sha256(); + if (!EVP_DigestInit_ex(mctx, md, impl)) { + BIO_printf(bio_err, "Error setting digest %s\n", pname); + ERR_print_errors(bio_err); +diff --git a/apps/enc.c b/apps/enc.c +index 7b7c70b..a7d944c 100644 +--- a/apps/enc.c ++++ b/apps/enc.c +@@ -344,7 +344,7 @@ int MAIN(int argc, char **argv) + } + + if (dgst == NULL) { +- dgst = EVP_md5(); ++ dgst = EVP_sha256(); + } + + if (bufsize != NULL) { +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/openssl-c_rehash.sh b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/openssl-c_rehash.sh index f67f41554..6620fdcb5 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/openssl-c_rehash.sh +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl/openssl-c_rehash.sh @@ -114,11 +114,11 @@ link_hash() LINKFILE=${HASH}.${TAG}${SUFFIX} done - echo "${1} => ${LINKFILE}" + echo "${3} => ${LINKFILE}" # assume any system with a POSIX shell will either support symlinks or # do something to handle this gracefully - ln -s ${1} ${LINKFILE} + ln -s ${3} ${LINKFILE} return 0 } @@ -142,7 +142,19 @@ hash_dir() ls -1 *.pem *.cer *.crt *.crl 2>/dev/null | while read FILE do - check_file ${FILE} + REAL_FILE=${FILE} + # if we run on build host then get to the real files in rootfs + if [ -n "${SYSROOT}" -a -h ${FILE} ] + then + FILE=$( readlink ${FILE} ) + # check the symlink is absolute (or dangling in other word) + if [ "x/" = "x$( echo ${FILE} | cut -c1 -)" ] + then + REAL_FILE=${SYSROOT}/${FILE} + fi + fi + + check_file ${REAL_FILE} local FILE_TYPE=${?} local TYPE_STR='' @@ -157,7 +169,7 @@ hash_dir() continue fi - link_hash ${FILE} ${TYPE_STR} + link_hash ${REAL_FILE} ${TYPE_STR} ${FILE} done } diff --git a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2j.bb b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2j.bb index 257e3cfc4..b6fb12634 100644 --- a/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2j.bb +++ b/import-layers/yocto-poky/meta/recipes-connectivity/openssl/openssl_1.0.2j.bb @@ -5,6 +5,7 @@ require openssl.inc DEPENDS += "cryptodev-linux" CFLAG += "-DHAVE_CRYPTODEV -DUSE_CRYPTODEV_DIGESTS" +CFLAG_append_class-native = " -fPIC" LIC_FILES_CHKSUM = "file://LICENSE;md5=27ffa5d74bb5a337056c14b2ef93fbf6" @@ -40,6 +41,9 @@ SRC_URI += "file://find.pl;subdir=${BP}/util/ \ file://configure-musl-target.patch \ file://parallel.patch \ file://openssl-util-perlpath.pl-cwd.patch \ + file://CVE-2016-7055.patch \ + file://0001-CVE-2017-3731.patch \ + file://0002-CVE-2017-3731.patch \ " SRC_URI[md5sum] = "96322138f0b69e61b7212bc53d5e912b" SRC_URI[sha256sum] = "e7aff292be21c259c6af26469c7a9b3ba26e9abaaffd325e3dccc9785256c431" diff --git a/import-layers/yocto-poky/meta/recipes-core/base-files/base-files/profile b/import-layers/yocto-poky/meta/recipes-core/base-files/base-files/profile index 7367fd1e2..c616616ce 100644 --- a/import-layers/yocto-poky/meta/recipes-core/base-files/base-files/profile +++ b/import-layers/yocto-poky/meta/recipes-core/base-files/base-files/profile @@ -26,7 +26,7 @@ if [ -x /usr/bin/resize ] && termpath="`tty`"; then # Make sure we are on a serial console (i.e. the device used starts with /dev/tty), # otherwise we confuse e.g. the eclipse launcher which tries do use ssh case "$termpath" in - /dev/tty*) resize >/dev/null + /dev/tty[A-z]*) resize >/dev/null esac fi diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox-1.24.1/ifupdown-pass-interface-device-name-for-ipv6-route-c.patch b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox-1.24.1/ifupdown-pass-interface-device-name-for-ipv6-route-c.patch new file mode 100644 index 000000000..5715378af --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox-1.24.1/ifupdown-pass-interface-device-name-for-ipv6-route-c.patch @@ -0,0 +1,52 @@ +From 06fcf98f6ca40dc6b823d7d6231a240a1794ef2d Mon Sep 17 00:00:00 2001 +From: Haiqing Bai <Haiqing.Bai@windriver.com> +Date: Tue, 28 Feb 2017 10:40:37 +0800 +Subject: [PATCH] ifupdown: pass interface device name for ipv6 route commands +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 028524317d8d0011ed38e86e507a06738a5b5a97 from upstream + +IPv6 routes need the device argument for link-local routes, or they +cannot be used at all. E.g. "gateway fe80::def" seems to be used in +some places, but kernel refuses to insert the route unless device +name is explicitly specified in the route addition. + +Signed-off-by: Timo Teräs <timo.teras@iki.fi> +Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> + +Upstream-Status: Backport +Signed-off-by: Haiqing Bai <Haiqing.Bai@windriver.com> +--- + networking/ifupdown.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/networking/ifupdown.c b/networking/ifupdown.c +index 17bc4e9..a00f68d 100644 +--- a/networking/ifupdown.c ++++ b/networking/ifupdown.c +@@ -394,8 +394,8 @@ static int FAST_FUNC static_up6(struct interface_defn_t *ifd, execfn *exec) + # if ENABLE_FEATURE_IFUPDOWN_IP + result = execute("ip addr add %address%/%netmask% dev %iface%[[ label %label%]]", ifd, exec); + result += execute("ip link set[[ mtu %mtu%]][[ addr %hwaddress%]] %iface% up", ifd, exec); +- /* Was: "[[ ip ....%gateway% ]]". Removed extra spaces w/o checking */ +- result += execute("[[ip route add ::/0 via %gateway%]][[ metric %metric%]]", ifd, exec); ++ /* Reportedly, IPv6 needs "dev %iface%", but IPv4 does not: */ ++ result += execute("[[ip route add ::/0 via %gateway% dev %iface%]][[ metric %metric%]]", ifd, exec); + # else + result = execute("ifconfig %iface%[[ media %media%]][[ hw %hwaddress%]][[ mtu %mtu%]] up", ifd, exec); + result += execute("ifconfig %iface% add %address%/%netmask%", ifd, exec); +@@ -421,7 +421,8 @@ static int FAST_FUNC v4tunnel_up(struct interface_defn_t *ifd, execfn *exec) + "%endpoint%[[ local %local%]][[ ttl %ttl%]]", ifd, exec); + result += execute("ip link set %iface% up", ifd, exec); + result += execute("ip addr add %address%/%netmask% dev %iface%", ifd, exec); +- result += execute("[[ip route add ::/0 via %gateway%]]", ifd, exec); ++ /* Reportedly, IPv6 needs "dev %iface%", but IPv4 does not: */ ++ result += execute("[[ip route add ::/0 via %gateway% dev %iface%]]", ifd, exec); + return ((result == 4) ? 4 : 0); + } + +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-flock-update-the-behaviour-of-c-parameter-to-match-u.patch b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-flock-update-the-behaviour-of-c-parameter-to-match-u.patch index 8bcbd73de..78520f0d9 100644 --- a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-flock-update-the-behaviour-of-c-parameter-to-match-u.patch +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-flock-update-the-behaviour-of-c-parameter-to-match-u.patch @@ -1,35 +1,24 @@ -From 198f18addf1d814c2fefcb492f3b9fbd221669bb Mon Sep 17 00:00:00 2001 -From: "Maxin B. John" <maxin.john@intel.com> -Date: Wed, 20 Apr 2016 18:24:45 +0300 -Subject: [PATCH] flock: update the behaviour of -c parameter to match upstream - -In upstream, -c 'PROG ARGS' means "run sh -c 'PROG ARGS'" +From e1d426fd65c00a6d01a10d85edf8a294ae8a2d2b Mon Sep 17 00:00:00 2001 +From: Denys Vlasenko <vda.linux@googlemail.com> +Date: Sun, 24 Apr 2016 18:19:49 +0200 +Subject: [PATCH] flock: fix -c; improve error handling of fork+exec function old new delta -flock_main 286 377 +91 -.rodata 155849 155890 +41 +flock_main 254 334 +80 -Upstream-Status: Submitted -[ http://lists.busybox.net/pipermail/busybox/2016-April/084142.html ] +Upstream-Status: Backport +Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com> Signed-off-by: Maxin B. John <maxin.john@intel.com> --- - util-linux/flock.c | 20 ++++++++++++++------ - 1 file changed, 14 insertions(+), 6 deletions(-) + util-linux/flock.c | 19 +++++++++++++++++-- + 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/util-linux/flock.c b/util-linux/flock.c -index 05a747f..c85a25d 100644 +index 05a747f..539a835 100644 --- a/util-linux/flock.c +++ b/util-linux/flock.c -@@ -20,6 +20,7 @@ int flock_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; - int flock_main(int argc UNUSED_PARAM, char **argv) - { - int mode, opt, fd; -+ char *cmd_args[4]; - enum { - OPT_s = (1 << 0), - OPT_x = (1 << 1), -@@ -57,7 +58,6 @@ int flock_main(int argc UNUSED_PARAM, char **argv) +@@ -57,7 +57,6 @@ int flock_main(int argc UNUSED_PARAM, char **argv) /* If it is "flock FILE -c PROG", then -c isn't caught by getopt32: * we use "+" in order to support "flock -opt FILE PROG -with-opts", * we need to remove -c by hand. @@ -37,35 +26,37 @@ index 05a747f..c85a25d 100644 */ if (argv[0] && argv[0][0] == '-' -@@ -65,7 +65,10 @@ int flock_main(int argc UNUSED_PARAM, char **argv) - || (ENABLE_LONG_OPTS && strcmp(argv[0] + 1, "-command") == 0) +@@ -66,6 +65,9 @@ int flock_main(int argc UNUSED_PARAM, char **argv) ) ) { -- argv++; -+ if (argc != optind + 3) -+ bb_error_msg_and_die("-c requires exactly one command argument"); -+ else -+ argv++; + argv++; ++ if (argv[1]) ++ bb_error_msg_and_die("-c takes only one argument"); ++ opt |= OPT_c; } if (OPT_s == LOCK_SH && OPT_x == LOCK_EX && OPT_n == LOCK_NB && OPT_u == LOCK_UN) { -@@ -89,9 +92,14 @@ int flock_main(int argc UNUSED_PARAM, char **argv) - return EXIT_FAILURE; +@@ -90,8 +92,21 @@ int flock_main(int argc UNUSED_PARAM, char **argv) bb_perror_nomsg_and_die(); } -- + - if (argv[0]) -- return spawn_and_wait(argv); -- -+ if (argv[0]) { -+ cmd_args[0] = getenv("SHELL"); -+ if (!cmd_args[0]) -+ cmd_args[0] = (char*)DEFAULT_SHELL; -+ cmd_args[1] = (char*)"-c"; -+ cmd_args[2] = argv[0]; -+ cmd_args[3] = NULL; -+ return spawn_and_wait(cmd_args); -+ } ++ if (argv[0]) { ++ if (!(opt & OPT_c)) { ++ int rc = spawn_and_wait(argv); ++ if (rc < 0) ++ bb_simple_perror_msg(argv[0]); ++ return rc; ++ } ++ /* -c 'PROG ARGS' means "run sh -c 'PROG ARGS'" */ ++ argv -= 2; ++ argv[0] = (char*)get_shell_name(); ++ argv[1] = (char*)"-c"; ++ /* argv[2] = "PROG ARGS"; */ ++ /* argv[3] = NULL; */ + return spawn_and_wait(argv); ++ } + return EXIT_SUCCESS; } -- diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-libiproute-handle-table-ids-larger-than-255.patch b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-libiproute-handle-table-ids-larger-than-255.patch new file mode 100644 index 000000000..aac5b4029 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/0001-libiproute-handle-table-ids-larger-than-255.patch @@ -0,0 +1,134 @@ +From b5a9234272e6084557224c73ab7737ed47f09848 Mon Sep 17 00:00:00 2001 +From: Lukasz Nowak <lnowak@tycoint.com> +Date: Wed, 23 Nov 2016 12:48:21 +0000 +Subject: [PATCH v2] libiproute: handle table ids larger than 255 + +Linux kernel, starting from 2.6.19 allows ip table ids to have 32-bit values. +In order to preserve compatibility, the old 8-bit field: rtm_table is still +in use when table id is lower than 256. + +Add support for the 32-bit table id (RTA_TABLE attribute) in: +- ip route print +- ip route modify +- ip rule print +- ip rule modify + +Add printing of table ids to ip route. + +Changes are compatible with the mainline iproute2 utilities. + +These changes are required for compatibility with ConnMan, which by default +uses table ids greater than 255. + +Upstream-Status: Submitted [http://lists.busybox.net/pipermail/busybox/2016-December/084989.html] + +Signed-off-by: Lukasz Nowak <lnowak@tycoint.com> +--- + networking/libiproute/iproute.c | 24 ++++++++++++++++++++---- + networking/libiproute/iprule.c | 11 +++++++++-- + 2 files changed, 29 insertions(+), 6 deletions(-) + +diff --git a/networking/libiproute/iproute.c b/networking/libiproute/iproute.c +index 6ecd5f7..d5af498 100644 +--- a/networking/libiproute/iproute.c ++++ b/networking/libiproute/iproute.c +@@ -87,6 +87,7 @@ static int FAST_FUNC print_route(const struct sockaddr_nl *who UNUSED_PARAM, + inet_prefix dst; + inet_prefix src; + int host_len = -1; ++ uint32_t tid; + + if (n->nlmsg_type != RTM_NEWROUTE && n->nlmsg_type != RTM_DELROUTE) { + fprintf(stderr, "Not a route: %08x %08x %08x\n", +@@ -99,6 +100,14 @@ static int FAST_FUNC print_route(const struct sockaddr_nl *who UNUSED_PARAM, + if (len < 0) + bb_error_msg_and_die("wrong nlmsg len %d", len); + ++ memset(tb, 0, sizeof(tb)); ++ parse_rtattr(tb, RTA_MAX, RTM_RTA(r), len); ++ ++ if (tb[RTA_TABLE]) ++ tid = *(uint32_t *)RTA_DATA(tb[RTA_TABLE]); ++ else ++ tid = r->rtm_table; ++ + if (r->rtm_family == AF_INET6) + host_len = 128; + else if (r->rtm_family == AF_INET) +@@ -128,7 +137,7 @@ static int FAST_FUNC print_route(const struct sockaddr_nl *who UNUSED_PARAM, + } + } + } else { +- if (G_filter.tb > 0 && G_filter.tb != r->rtm_table) { ++ if (G_filter.tb > 0 && G_filter.tb != tid) { + return 0; + } + } +@@ -157,10 +166,8 @@ static int FAST_FUNC print_route(const struct sockaddr_nl *who UNUSED_PARAM, + return 0; + } + +- memset(tb, 0, sizeof(tb)); + memset(&src, 0, sizeof(src)); + memset(&dst, 0, sizeof(dst)); +- parse_rtattr(tb, RTA_MAX, RTM_RTA(r), len); + + if (tb[RTA_SRC]) { + src.bitlen = r->rtm_src_len; +@@ -283,6 +290,10 @@ static int FAST_FUNC print_route(const struct sockaddr_nl *who UNUSED_PARAM, + if (tb[RTA_OIF]) { + printf("dev %s ", ll_index_to_name(*(int*)RTA_DATA(tb[RTA_OIF]))); + } ++#if ENABLE_FEATURE_IP_RULE ++ if (tid && tid != RT_TABLE_MAIN && !G_filter.tb) ++ printf("table %s ", rtnl_rttable_n2a(tid)); ++#endif + + /* Todo: parse & show "proto kernel", "scope link" here */ + +@@ -434,7 +445,12 @@ IF_FEATURE_IP_RULE(ARG_table,) + NEXT_ARG(); + if (rtnl_rttable_a2n(&tid, *argv)) + invarg(*argv, "table"); +- req.r.rtm_table = tid; ++ if (tid < 256) ++ req.r.rtm_table = tid; ++ else { ++ req.r.rtm_table = RT_TABLE_UNSPEC; ++ addattr32(&req.n, sizeof(req), RTA_TABLE, tid); ++ } + #endif + } else if (arg == ARG_dev || arg == ARG_oif) { + NEXT_ARG(); +diff --git a/networking/libiproute/iprule.c b/networking/libiproute/iprule.c +index 774a3e2..3fac7c5 100644 +--- a/networking/libiproute/iprule.c ++++ b/networking/libiproute/iprule.c +@@ -119,7 +119,9 @@ static int FAST_FUNC print_rule(const struct sockaddr_nl *who UNUSED_PARAM, + printf("iif %s ", (char*)RTA_DATA(tb[RTA_IIF])); + } + +- if (r->rtm_table) ++ if (tb[RTA_TABLE]) ++ printf("lookup %s ", rtnl_rttable_n2a(*(uint32_t*)RTA_DATA(tb[RTA_TABLE]))); ++ else if (r->rtm_table) + printf("lookup %s ", rtnl_rttable_n2a(r->rtm_table)); + + if (tb[RTA_FLOW]) { +@@ -259,7 +261,12 @@ static int iprule_modify(int cmd, char **argv) + NEXT_ARG(); + if (rtnl_rttable_a2n(&tid, *argv)) + invarg(*argv, "table ID"); +- req.r.rtm_table = tid; ++ if (tid < 256) ++ req.r.rtm_table = tid; ++ else { ++ req.r.rtm_table = RT_TABLE_UNSPEC; ++ addattr32(&req.n, sizeof(req), RTA_TABLE, tid); ++ } + table_ok = 1; + } else if (key == ARG_dev || + key == ARG_iif +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/BUG9071_buffer_overflow_arp.patch b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/BUG9071_buffer_overflow_arp.patch new file mode 100644 index 000000000..828694cbb --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/BUG9071_buffer_overflow_arp.patch @@ -0,0 +1,53 @@ +busybox1.24.1: Fix busybox - (local) cmdline stack buffer overwrite + +[No upstream tracking] -- https://bugs.busybox.net/show_bug.cgi?id=9071 + +busybox - (local) cmdline stack buffer overwrite + +Busybox provides an `arp` applet which is missing an array bounds check for +command-line parameter `IFNAME`. It is therefore vulnerable to a command-line +based local stack buffer overwrite effectively allowing local users to write +past a 16 bytes fixed stack buffer. This leads to two scenarios, one (A) where +an IOCTL for GET_HW_ADDRESS (`SIOCGIFHWADDR`) fails and results in a corrupted +`va_list` being passed to `*printf()` and one (B) where an attacker might provide +valid params for the IOCTL and trick the program to proceed and result in a +`RET eip overwrite` eventually gaining code execution. + +Upstream-Status: Backport [https://git.busybox.net/busybox/commit/networking/arp.c?id=88e2b1cb626761b1924305b761a5dfc723613c4e] +BUG: BUG9071 +Signed-off-by: Martin Balik <martin.balik@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +-- + +diff --git a/networking/arp.c b/networking/arp.c +index 0099aa5..87eb327 100644 +--- a/networking/arp.c ++++ b/networking/arp.c +@@ -176,7 +176,7 @@ static int arp_del(char **args) + if (flags == 0) + flags = 3; + +- strncpy(req.arp_dev, device, sizeof(req.arp_dev)); ++ strncpy_IFNAMSIZ(req.arp_dev, device); + + err = -1; + +@@ -217,7 +217,7 @@ static void arp_getdevhw(char *ifname, struct sockaddr *sa) + struct ifreq ifr; + const struct hwtype *xhw; + +- strcpy(ifr.ifr_name, ifname); ++ strncpy_IFNAMSIZ(ifr.ifr_name, ifname); + ioctl_or_perror_and_die(sockfd, SIOCGIFHWADDR, &ifr, + "can't get HW-Address for '%s'", ifname); + if (hw_set && (ifr.ifr_hwaddr.sa_family != hw->type)) { +@@ -330,7 +330,7 @@ static int arp_set(char **args) + /* Fill in the remainder of the request. */ + req.arp_flags = flags; + +- strncpy(req.arp_dev, device, sizeof(req.arp_dev)); ++ strncpy_IFNAMSIZ(req.arp_dev, device); + + /* Call the kernel. */ + if (option_mask32 & ARP_OPT_v) diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/CVE-2016-6301.patch b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/CVE-2016-6301.patch new file mode 100644 index 000000000..851bc20f7 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox/CVE-2016-6301.patch @@ -0,0 +1,37 @@ +busybox1.24.1: Fix CVE-2016-6301 + +[No upstream tracking] -- https://bugzilla.redhat.com/show_bug.cgi?id=1363710 + +ntpd: NTP server denial of service flaw + +The busybox NTP implementation doesn't check the NTP mode of packets +received on the server port and responds to any packet with the right +size. This includes responses from another NTP server. An attacker can +send a packet with a spoofed source address in order to create an +infinite loop of responses between two busybox NTP servers. Adding +more packets to the loop increases the traffic between the servers +until one of them has a fully loaded CPU and/or network. + +Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=150dc7a2b483b8338a3e185c478b4b23ee884e71] +CVE: CVE-2016-6301 +Signed-off-by: Andrej Valek <andrej.valek@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +diff --git a/networking/ntpd.c b/networking/ntpd.c +index 9732c9b..0f6a55f 100644 +--- a/networking/ntpd.c ++++ b/networking/ntpd.c +@@ -1985,6 +1985,13 @@ recv_and_process_client_pkt(void /*int fd*/) + goto bail; + } + ++ /* Respond only to client and symmetric active packets */ ++ if ((msg.m_status & MODE_MASK) != MODE_CLIENT ++ && (msg.m_status & MODE_MASK) != MODE_SYM_ACT ++ ) { ++ goto bail; ++ } ++ + query_status = msg.m_status; + query_xmttime = msg.m_xmttime; + diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox_1.24.1.bb b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox_1.24.1.bb index df0e13126..400cdfe28 100644 --- a/import-layers/yocto-poky/meta/recipes-core/busybox/busybox_1.24.1.bb +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/busybox_1.24.1.bb @@ -47,12 +47,16 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \ file://CVE-2016-2148.patch \ file://CVE-2016-2147.patch \ file://CVE-2016-2147_2.patch \ + file://CVE-2016-6301.patch \ file://ip_fix_problem_on_mips64_n64_big_endian_musl_systems.patch \ file://makefile-fix-backport.patch \ file://0001-sed-fix-sed-n-flushes-pattern-space-terminates-early.patch \ file://busybox-kbuild-race-fix-commit-d8e61bb.patch \ file://commit-applet_tables-fix-commit-0dddbc1.patch \ file://makefile-libbb-race.patch \ + file://0001-libiproute-handle-table-ids-larger-than-255.patch \ + file://ifupdown-pass-interface-device-name-for-ipv6-route-c.patch \ + file://BUG9071_buffer_overflow_arp.patch \ " SRC_URI_append_libc-musl = " file://musl.cfg " diff --git a/import-layers/yocto-poky/meta/recipes-core/busybox/files/mdev.conf b/import-layers/yocto-poky/meta/recipes-core/busybox/files/mdev.conf index 17e93da7c..4e9c23f14 100644 --- a/import-layers/yocto-poky/meta/recipes-core/busybox/files/mdev.conf +++ b/import-layers/yocto-poky/meta/recipes-core/busybox/files/mdev.conf @@ -39,4 +39,6 @@ input/mouse.* 0:0 0660 tun[0-9]* 0:0 0660 =net/ [hs]d[a-z][0-9]? 0:0 660 */etc/mdev/mdev-mount.sh +mmcblk[0-9]rpmb 0:0 660 +mmcblk[0-9]boot[0-9] 0:0 660 mmcblk[0-9].* 0:0 660 */etc/mdev/mdev-mount.sh diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc.inc b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc.inc index e85c7044a..7bae0e955 100644 --- a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc.inc +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc.inc @@ -78,3 +78,9 @@ do_configure_prepend() { } GLIBC_ADDONS ?= "nptl,libidn" + +# Enable backtrace from abort() +do_configure_append_arm () { + echo "CFLAGS-abort.c = -fasynchronous-unwind-tables" >> ${B}/configparms + echo "CFLAGS-raise.c = -fasynchronous-unwind-tables" >> ${B}/configparms +} diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0001-Add-atomic_exchange_relaxed.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0001-Add-atomic_exchange_relaxed.patch new file mode 100644 index 000000000..a33a135f7 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0001-Add-atomic_exchange_relaxed.patch @@ -0,0 +1,58 @@ +From ce74a620bf9e1a40b7ba06d35160e20633a4d8bb Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 7 Jul 2017 13:11:16 +0300 +Subject: [PATCH 1/6] Add atomic_exchange_relaxed. + +* include/atomic.h (atomic_exchange_relaxed): New + +Upstream-Status: Backport + +Author: Torvald Riegel <triegel@redhat.com> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 4 ++++ + include/atomic.h | 9 +++++++++ + 2 files changed, 13 insertions(+) + +diff --git a/ChangeLog b/ChangeLog +index 0fbda90..cb87279 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,7 @@ ++2016-08-05 Torvald Riegel <triegel@redhat.com> ++ ++ * include/atomic.h (atomic_exchange_relaxed): New. ++ + 2016-01-28 Carlos O'Donell <carlos@redhat.com> + Alexey Makhalov <amakhalov@vmware.com> + Florian Weimer <fweimer@redhat.com> +diff --git a/include/atomic.h b/include/atomic.h +index ad3db25..129ee24 100644 +--- a/include/atomic.h ++++ b/include/atomic.h +@@ -588,6 +588,9 @@ void __atomic_link_error (void); + __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ + __ATOMIC_RELEASE, __ATOMIC_RELAXED); }) + ++# define atomic_exchange_relaxed(mem, desired) \ ++ ({ __atomic_check_size((mem)); \ ++ __atomic_exchange_n ((mem), (desired), __ATOMIC_RELAXED); }) + # define atomic_exchange_acquire(mem, desired) \ + ({ __atomic_check_size((mem)); \ + __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); }) +@@ -684,6 +687,12 @@ void __atomic_link_error (void); + *(expected) == __atg103_expected; }) + # endif + ++/* XXX Fall back to acquire MO because archs do not define a weaker ++ atomic_exchange. */ ++# ifndef atomic_exchange_relaxed ++# define atomic_exchange_relaxed(mem, val) \ ++ atomic_exchange_acq ((mem), (val)) ++# endif + # ifndef atomic_exchange_acquire + # define atomic_exchange_acquire(mem, val) \ + atomic_exchange_acq ((mem), (val)) +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0002-Add-atomic-operations-required-by-the-new-condition-.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0002-Add-atomic-operations-required-by-the-new-condition-.patch new file mode 100644 index 000000000..c4747fa27 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0002-Add-atomic-operations-required-by-the-new-condition-.patch @@ -0,0 +1,124 @@ +From b85e30e655027132c4326d2fdde010c517165aaf Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 30 Jun 2017 14:27:34 +0300 +Subject: [PATCH 2/6] Add atomic operations required by the new condition + variable. + + * include/atomic.h (atomic_fetch_and_relaxed, + atomic_fetch_and_release, atomic_fetch_or_release, + atomic_fetch_xor_release): New. + +Upstream-Status: Backport + +Author: Torvald Riegel <triegel@redhat.com> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 6 ++++++ + include/atomic.h | 47 +++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 53 insertions(+) + +diff --git a/ChangeLog b/ChangeLog +index cb87279..96b6da2 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,9 @@ ++2016-08-09 Torvald Riegel <triegel@redhat.com> ++ ++ * include/atomic.h (atomic_fetch_and_relaxed, ++ atomic_fetch_and_release, atomic_fetch_or_release, ++ atomic_fetch_xor_release): New. ++ + 2016-08-05 Torvald Riegel <triegel@redhat.com> + + * include/atomic.h (atomic_exchange_relaxed): New. +diff --git a/include/atomic.h b/include/atomic.h +index 129ee24..5a8e7e7 100644 +--- a/include/atomic.h ++++ b/include/atomic.h +@@ -611,9 +611,15 @@ void __atomic_link_error (void); + ({ __atomic_check_size((mem)); \ + __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); }) + ++# define atomic_fetch_and_relaxed(mem, operand) \ ++ ({ __atomic_check_size((mem)); \ ++ __atomic_fetch_and ((mem), (operand), __ATOMIC_RELAXED); }) + # define atomic_fetch_and_acquire(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); }) ++# define atomic_fetch_and_release(mem, operand) \ ++ ({ __atomic_check_size((mem)); \ ++ __atomic_fetch_and ((mem), (operand), __ATOMIC_RELEASE); }) + + # define atomic_fetch_or_relaxed(mem, operand) \ + ({ __atomic_check_size((mem)); \ +@@ -621,6 +627,13 @@ void __atomic_link_error (void); + # define atomic_fetch_or_acquire(mem, operand) \ + ({ __atomic_check_size((mem)); \ + __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); }) ++# define atomic_fetch_or_release(mem, operand) \ ++ ({ __atomic_check_size((mem)); \ ++ __atomic_fetch_or ((mem), (operand), __ATOMIC_RELEASE); }) ++ ++# define atomic_fetch_xor_release(mem, operand) \ ++ ({ __atomic_check_size((mem)); \ ++ __atomic_fetch_xor ((mem), (operand), __ATOMIC_RELEASE); }) + + #else /* !USE_ATOMIC_COMPILER_BUILTINS */ + +@@ -724,12 +737,24 @@ void __atomic_link_error (void); + atomic_exchange_and_add_acq ((mem), (operand)); }) + # endif + ++/* XXX Fall back to acquire MO because archs do not define a weaker ++ atomic_and_val. */ ++# ifndef atomic_fetch_and_relaxed ++# define atomic_fetch_and_relaxed(mem, operand) \ ++ atomic_fetch_and_acquire ((mem), (operand)) ++# endif + /* XXX The default for atomic_and_val has acquire semantics, but this is not + documented. */ + # ifndef atomic_fetch_and_acquire + # define atomic_fetch_and_acquire(mem, operand) \ + atomic_and_val ((mem), (operand)) + # endif ++# ifndef atomic_fetch_and_release ++/* XXX This unnecessarily has acquire MO. */ ++# define atomic_fetch_and_release(mem, operand) \ ++ ({ atomic_thread_fence_release (); \ ++ atomic_and_val ((mem), (operand)); }) ++# endif + + /* XXX The default for atomic_or_val has acquire semantics, but this is not + documented. */ +@@ -743,6 +768,28 @@ void __atomic_link_error (void); + # define atomic_fetch_or_relaxed(mem, operand) \ + atomic_fetch_or_acquire ((mem), (operand)) + # endif ++/* XXX Contains an unnecessary acquire MO because archs do not define a weaker ++ atomic_or_val. */ ++# ifndef atomic_fetch_or_release ++# define atomic_fetch_or_release(mem, operand) \ ++ ({ atomic_thread_fence_release (); \ ++ atomic_fetch_or_acquire ((mem), (operand)); }) ++# endif ++ ++# ifndef atomic_fetch_xor_release ++# define atomic_fetch_xor_release(mem, operand) \ ++ ({ __typeof (*(mem)) __atg104_old; \ ++ __typeof (mem) __atg104_memp = (mem); \ ++ __typeof (*(mem)) __atg104_op = (operand); \ ++ \ ++ do \ ++ __atg104_old = (*__atg104_memp); \ ++ while (__builtin_expect \ ++ (atomic_compare_and_exchange_bool_rel ( \ ++ __atg104_memp, __atg104_old ^ __atg104_op, __atg104_old), 0));\ ++ \ ++ __atg104_old; }) ++#endif + + #endif /* !USE_ATOMIC_COMPILER_BUILTINS */ + +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0003-Add-pretty-printers-for-the-NPTL-lock-types.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0003-Add-pretty-printers-for-the-NPTL-lock-types.patch new file mode 100644 index 000000000..9eb635d71 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0003-Add-pretty-printers-for-the-NPTL-lock-types.patch @@ -0,0 +1,3197 @@ +From 246fee86fc90c57738ee282a061039f82832f4ea Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 30 Jun 2017 13:42:04 +0300 +Subject: [PATCH 3/6] Add pretty printers for the NPTL lock types + +This patch adds pretty printers for the following NPTL types: + +- pthread_mutex_t +- pthread_mutexattr_t +- pthread_cond_t +- pthread_condattr_t +- pthread_rwlock_t +- pthread_rwlockattr_t + +To load the pretty printers into your gdb session, do the following: + +python +import sys +sys.path.insert(0, '/path/to/glibc/build/nptl/pretty-printers') +end + +source /path/to/glibc/source/pretty-printers/nptl-printers.py + +You can check which printers are registered and enabled by issuing the +'info pretty-printer' gdb command. Printers should trigger automatically when +trying to print a variable of one of the types mentioned above. + +The printers are architecture-independent, and were tested on an AMD64 running +Ubuntu 14.04 and an x86 VM running Fedora 24. + +In order to work, the printers need to know the values of various flags that +are scattered throughout pthread.h and pthreadP.h as enums and #defines. Since +replicating these constants in the printers file itself would create a +maintenance burden, I wrote a script called gen-py-const.awk that Makerules uses +to extract the constants. This script is pretty much the same as gen-as-const.awk, +except it doesn't cast the constant values to 'long' and is thorougly documented. +The constants need only to be enumerated in a .pysym file, which is then referenced +by a Make variable called gen-py-const-headers. + +As for the install directory, I discussed this with Mike Frysinger and Siddhesh +Poyarekar, and we agreed that it can be handled in a separate patch, and shouldn't +block merging of this one. + +In addition, I've written a series of test cases for the pretty printers. +Each lock type (mutex, condvar and rwlock) has two test programs, one for itself +and other for its related 'attributes' object. Each test program in turn has a +PExpect-based Python script that drives gdb and compares its output to the +expected printer's. The tests run on the glibc host, which is assumed to have +both gdb and PExpect; if either is absent the tests will fail with code 77 +(UNSUPPORTED). For cross-testing you should use cross-test-ssh.sh as test-wrapper. +I've tested the printers on both native builds and a cross build using a Beaglebone +Black running Debian, with the build system's filesystem shared with the board +through NFS. + +Finally, I've written a README that explains all this and more. + + * INSTALL: Regenerated. + * Makeconfig: Add comments and whitespace to make the control flow + clearer. + (+link-printers-tests, +link-pie-printers-tests, CFLAGS-printers-tests, + installed-rtld-LDFLAGS, built-rtld-LDFLAGS, link-libc-rpath, + link-libc-tests-after-rpath-link, link-libc-printers-tests): New. + (rtld-LDFLAGS, rtld-tests-LDFLAGS, link-libc-tests-rpath-link, + link-libc-tests): Use the new variables as required. + * Makerules ($(py-const)): New rule. + generated: Add $(py-const). + * README.pretty-printers: New file. + * Rules (tests-printers-programs, tests-printers-out, py-env): New. + (others): Depend on $(py-const). + (tests): Depend on $(tests-printers-programs) or $(tests-printers-out), + as required. Pass $(tests-printers) to merge-test-results.sh. + * manual/install.texi: Add requirements for testing the pretty printers. + * nptl/Makefile (gen-py-const-headers, pretty-printers, tests-printers, + CFLAGS-test-mutexattr-printers.c CFLAGS-test-mutex-printers.c, + CFLAGS-test-condattr-printers.c, CFLAGS-test-cond-printers.c, + CFLAGS-test-rwlockattr-printers.c CFLAGS-test-rwlock-printers.c, + tests-printers-libs): Define. + * nptl/nptl-printers.py: New file. + * nptl/nptl_lock_constants.pysym: Likewise. + * nptl/test-cond-printers.c: Likewise. + * nptl/test-cond-printers.py: Likewise. + * nptl/test-condattr-printers.c: Likewise. + * nptl/test-condattr-printers.py: Likewise. + * nptl/test-mutex-printers.c: Likewise. + * nptl/test-mutex-printers.py: Likewise. + * nptl/test-mutexattr-printers.c: Likewise. + * nptl/test-mutexattr-printers.py: Likewise. + * nptl/test-rwlock-printers.c: Likewise. + * nptl/test-rwlock-printers.py: Likewise. + * nptl/test-rwlockattr-printers.c: Likewise. + * nptl/test-rwlockattr-printers.py: Likewise. + * scripts/gen-py-const.awk: Likewise. + * scripts/test_printers_common.py: Likewise. + * scripts/test_printers_exceptions.py: Likewise. + +Upstream-Status: Backport + +Author: Martin Galvan <martin.galvan@tallertechnologies.com> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 45 +++ + INSTALL | 27 ++ + Makeconfig | 76 ++++- + Makerules | 46 +++ + NEWS | 6 + + README.pretty-printers | 169 ++++++++++ + Rules | 44 ++- + manual/install.texi | 30 ++ + nptl/Makefile | 18 + + nptl/nptl-printers.py | 633 ++++++++++++++++++++++++++++++++++++ + nptl/nptl_lock_constants.pysym | 75 +++++ + nptl/test-cond-printers.c | 57 ++++ + nptl/test-cond-printers.py | 50 +++ + nptl/test-condattr-printers.c | 94 ++++++ + nptl/test-condattr-printers.py | 71 ++++ + nptl/test-mutex-printers.c | 151 +++++++++ + nptl/test-mutex-printers.py | 97 ++++++ + nptl/test-mutexattr-printers.c | 144 ++++++++ + nptl/test-mutexattr-printers.py | 101 ++++++ + nptl/test-rwlock-printers.c | 78 +++++ + nptl/test-rwlock-printers.py | 64 ++++ + nptl/test-rwlockattr-printers.c | 98 ++++++ + nptl/test-rwlockattr-printers.py | 73 +++++ + scripts/gen-py-const.awk | 118 +++++++ + scripts/test_printers_common.py | 364 +++++++++++++++++++++ + scripts/test_printers_exceptions.py | 61 ++++ + 26 files changed, 2770 insertions(+), 20 deletions(-) + create mode 100644 README.pretty-printers + create mode 100644 nptl/nptl-printers.py + create mode 100644 nptl/nptl_lock_constants.pysym + create mode 100644 nptl/test-cond-printers.c + create mode 100644 nptl/test-cond-printers.py + create mode 100644 nptl/test-condattr-printers.c + create mode 100644 nptl/test-condattr-printers.py + create mode 100644 nptl/test-mutex-printers.c + create mode 100644 nptl/test-mutex-printers.py + create mode 100644 nptl/test-mutexattr-printers.c + create mode 100644 nptl/test-mutexattr-printers.py + create mode 100644 nptl/test-rwlock-printers.c + create mode 100644 nptl/test-rwlock-printers.py + create mode 100644 nptl/test-rwlockattr-printers.c + create mode 100644 nptl/test-rwlockattr-printers.py + create mode 100644 scripts/gen-py-const.awk + create mode 100644 scripts/test_printers_common.py + create mode 100644 scripts/test_printers_exceptions.py + +diff --git a/ChangeLog b/ChangeLog +index 96b6da2..8036c1e 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,48 @@ ++2016-12-08 Martin Galvan <martin.galvan@tallertechnologies.com> ++ ++ * INSTALL: Regenerated. ++ * Makeconfig: Add comments and whitespace to make the control flow ++ clearer. ++ (+link-printers-tests, +link-pie-printers-tests, ++ CFLAGS-printers-tests, installed-rtld-LDFLAGS, ++ built-rtld-LDFLAGS, link-libc-rpath, ++ link-libc-tests-after-rpath-link, ++ link-libc-printers-tests): New. ++ (rtld-LDFLAGS, rtld-tests-LDFLAGS, link-libc-tests-rpath-link, ++ link-libc-tests): Use the new variables as required. ++ * Makerules ($(py-const)): New rule. ++ generated: Add $(py-const). ++ * README.pretty-printers: New file. ++ * Rules (tests-printers-programs, tests-printers-out, py-env): New. ++ (others): Depend on $(py-const). ++ (tests): Depend on $(tests-printers-programs) or ++ $(tests-printers-out), ++ as required. Pass $(tests-printers) to merge-test-results.sh. ++ * manual/install.texi: Add requirements for testing the pretty ++ printers. ++ * nptl/Makefile (gen-py-const-headers, pretty-printers, ++ tests-printers, CFLAGS-test-mutexattr-printers.c ++ CFLAGS-test-mutex-printers.c, CFLAGS-test-condattr-printers.c, ++ CFLAGS-test-cond-printers.c, CFLAGS-test-rwlockattr-printers.c ++ CFLAGS-test-rwlock-printers.c, tests-printers-libs): Define. ++ * nptl/nptl-printers.py: New file. ++ * nptl/nptl_lock_constants.pysym: Likewise. ++ * nptl/test-cond-printers.c: Likewise. ++ * nptl/test-cond-printers.py: Likewise. ++ * nptl/test-condattr-printers.c: Likewise. ++ * nptl/test-condattr-printers.py: Likewise. ++ * nptl/test-mutex-printers.c: Likewise. ++ * nptl/test-mutex-printers.py: Likewise. ++ * nptl/test-mutexattr-printers.c: Likewise. ++ * nptl/test-mutexattr-printers.py: Likewise. ++ * nptl/test-rwlock-printers.c: Likewise. ++ * nptl/test-rwlock-printers.py: Likewise. ++ * nptl/test-rwlockattr-printers.c: Likewise. ++ * nptl/test-rwlockattr-printers.py: Likewise. ++ * scripts/gen-py-const.awk: Likewise. ++ * scripts/test_printers_common.py: Likewise. ++ * scripts/test_printers_exceptions.py: Likewise. ++ + 2016-08-09 Torvald Riegel <triegel@redhat.com> + + * include/atomic.h (atomic_fetch_and_relaxed, +diff --git a/INSTALL b/INSTALL +index ec3445f..dd62c86 100644 +--- a/INSTALL ++++ b/INSTALL +@@ -224,6 +224,33 @@ You can specify 'stop-on-test-failure=y' when running 'make check' to + make the test run stop and exit with an error status immediately when a + failure occurs. + ++ The GNU C Library pretty printers come with their own set of scripts ++for testing, which run together with the rest of the testsuite through ++'make check'. These scripts require the following tools to run ++successfully: ++ ++ * Python 2.7.6/3.4.3 or later ++ ++ Python is required for running the printers' test scripts. ++ ++ * PExpect 4.0 ++ ++ The printer tests drive GDB through test programs and compare its ++ output to the printers'. PExpect is used to capture the output of ++ GDB, and should be compatible with the Python version in your ++ system. ++ ++ * GDB 7.8 or later with support for Python 2.7.6/3.4.3 or later ++ ++ GDB itself needs to be configured with Python support in order to ++ use the pretty printers. Notice that your system having Python ++ available doesn't imply that GDB supports it, nor that your ++ system's Python and GDB's have the same version. ++ ++If these tools are absent, the printer tests will report themselves as ++'UNSUPPORTED'. Notice that some of the printer tests require the GNU C ++Library to be compiled with debugging symbols. ++ + To format the 'GNU C Library Reference Manual' for printing, type + 'make dvi'. You need a working TeX installation to do this. The + distribution builds the on-line formatted version of the manual, as Info +diff --git a/Makeconfig b/Makeconfig +index 03fd89c..2d92d94 100644 +--- a/Makeconfig ++++ b/Makeconfig +@@ -416,6 +416,11 @@ $(+link-pie-before-libc) $(rtld-tests-LDFLAGS) $(link-libc-tests) \ + $(+link-pie-after-libc) + $(call after-link,$@) + endef ++define +link-pie-printers-tests ++$(+link-pie-before-libc) $(built-rtld-LDFLAGS) $(link-libc-printers-tests) \ ++ $(+link-pie-after-libc) ++$(call after-link,$@) ++endef + endif + # Command for statically linking programs with the C library. + ifndef +link-static +@@ -445,7 +450,8 @@ ifeq (yes,$(build-pie-default)) + no-pie-ldflag = -no-pie + +link = $(+link-pie) + +link-tests = $(+link-pie-tests) +-else +++link-printers-tests = $(+link-pie-printers-tests) ++else # not build-pie-default + +link-before-libc = $(CC) -nostdlib -nostartfiles -o $@ \ + $(sysdep-LDFLAGS) $(LDFLAGS) $(LDFLAGS-$(@F)) \ + $(combreloc-LDFLAGS) $(relro-LDFLAGS) $(hashstyle-LDFLAGS) \ +@@ -466,51 +472,87 @@ $(+link-before-libc) $(rtld-tests-LDFLAGS) $(link-libc-tests) \ + $(+link-after-libc) + $(call after-link,$@) + endef +-endif +-else ++define +link-printers-tests ++$(+link-before-libc) $(built-rtld-LDFLAGS) $(link-libc-printers-tests) \ ++ $(+link-after-libc) ++$(call after-link,$@) ++endef ++endif # build-pie-default ++else # build-static + +link = $(+link-static) + +link-tests = $(+link-static-tests) +-endif +-endif +++link-printers-tests = $(+link-static-tests) ++endif # build-shared ++endif # +link ++ ++# The pretty printer test programs need to be compiled without optimizations ++# so they won't confuse gdb. We could use either the 'GCC optimize' pragma ++# or the 'optimize' function attribute to achieve this; however, at least on ++# ARM, gcc always produces different debugging symbols when invoked with ++# a -O greater than 0 than when invoked with -O0, regardless of anything else ++# we're using to suppress optimizations. Therefore, we need to explicitly pass ++# -O0 to it through CFLAGS. ++# Additionally, the build system will try to -include $(common-objpfx)/config.h ++# when compiling the tests, which will throw an error if some special macros ++# (such as __OPTIMIZE__ and IS_IN_build) aren't defined. To avoid this, we ++# tell gcc to define IS_IN_build. ++CFLAGS-printers-tests := -O0 -ggdb3 -DIS_IN_build ++ + ifeq (yes,$(build-shared)) ++# These indicate whether to link using the built ld.so or the installed one. ++installed-rtld-LDFLAGS = -Wl,-dynamic-linker=$(rtlddir)/$(rtld-installed-name) ++built-rtld-LDFLAGS = -Wl,-dynamic-linker=$(elf-objpfx)ld.so ++ + ifndef rtld-LDFLAGS +-rtld-LDFLAGS = -Wl,-dynamic-linker=$(rtlddir)/$(rtld-installed-name) ++rtld-LDFLAGS = $(installed-rtld-LDFLAGS) + endif ++ + ifndef rtld-tests-LDFLAGS + ifeq (yes,$(build-hardcoded-path-in-tests)) +-rtld-tests-LDFLAGS = -Wl,-dynamic-linker=$(elf-objpfx)ld.so ++rtld-tests-LDFLAGS = $(built-rtld-LDFLAGS) + else +-rtld-tests-LDFLAGS = $(rtld-LDFLAGS) +-endif +-endif +-endif ++rtld-tests-LDFLAGS = $(installed-rtld-LDFLAGS) ++endif # build-hardcoded-path-in-tests ++endif # rtld-tests-LDFLAGS ++ ++endif # build-shared ++ + ifndef link-libc + ifeq (yes,$(build-shared)) + # We need the versioned name of libc.so in the deps of $(others) et al + # so that the symlink to libc.so is created before anything tries to + # run the linked programs. ++link-libc-rpath = -Wl,-rpath=$(rpath-link) + link-libc-rpath-link = -Wl,-rpath-link=$(rpath-link) ++ + ifeq (yes,$(build-hardcoded-path-in-tests)) +-link-libc-tests-rpath-link = -Wl,-rpath=$(rpath-link) ++link-libc-tests-rpath-link = $(link-libc-rpath) + else + link-libc-tests-rpath-link = $(link-libc-rpath-link) +-endif ++endif # build-hardcoded-path-in-tests ++ + link-libc-before-gnulib = $(common-objpfx)libc.so$(libc.so-version) \ + $(common-objpfx)$(patsubst %,$(libtype.oS),c) \ + $(as-needed) $(elf-objpfx)ld.so \ + $(no-as-needed) + link-libc = $(link-libc-rpath-link) $(link-libc-before-gnulib) $(gnulib) ++ ++link-libc-tests-after-rpath-link = $(link-libc-before-gnulib) $(gnulib-tests) + link-libc-tests = $(link-libc-tests-rpath-link) \ +- $(link-libc-before-gnulib) $(gnulib-tests) ++ $(link-libc-tests-after-rpath-link) ++# Pretty printer test programs always require rpath instead of rpath-link. ++link-libc-printers-tests = $(link-libc-rpath) \ ++ $(link-libc-tests-after-rpath-link) ++ + # This is how to find at build-time things that will be installed there. + rpath-dirs = math elf dlfcn nss nis rt resolv crypt mathvec + rpath-link = \ + $(common-objdir):$(subst $(empty) ,:,$(patsubst ../$(subdir),.,$(rpath-dirs:%=$(common-objpfx)%))) +-else ++else # build-static + link-libc = $(common-objpfx)libc.a $(otherlibs) $(gnulib) $(common-objpfx)libc.a $(gnulib) + link-libc-tests = $(common-objpfx)libc.a $(otherlibs) $(gnulib-tests) $(common-objpfx)libc.a $(gnulib-tests) +-endif +-endif ++endif # build-shared ++endif # link-libc + + # Differences in the linkers on the various platforms. + LDFLAGS-rpath-ORIGIN = -Wl,-rpath,'$$ORIGIN' +diff --git a/Makerules b/Makerules +index be3c11b..b7e0f59 100644 +--- a/Makerules ++++ b/Makerules +@@ -214,6 +214,52 @@ sed-remove-dotdot := -e 's@ *\([^ \/$$][^ \]*\)@ $$(..)\1@g' \ + -e 's@^\([^ \/$$][^ \]*\)@$$(..)\1@g' + endif + ++ifdef gen-py-const-headers ++# We'll use a static pattern rule to match .pysym files with their ++# corresponding generated .py files. ++# The generated .py files go in the submodule's dir in the glibc build dir. ++py-const-files := $(patsubst %.pysym,%.py,$(gen-py-const-headers)) ++py-const-dir := $(objpfx) ++py-const := $(addprefix $(py-const-dir),$(py-const-files)) ++py-const-script := $(..)scripts/gen-py-const.awk ++ ++# This is a hack we use to generate .py files with constants for Python ++# pretty printers. It works the same way as gen-as-const. ++# See scripts/gen-py-const.awk for details on how the awk | gcc mechanism ++# works. ++# ++# $@.tmp and $@.tmp2 are temporary files we use to store the partial contents ++# of the target file. We do this instead of just writing on $@ because, if the ++# build process terminates prematurely, re-running Make wouldn't run this rule ++# since Make would see that the target file already exists (despite it being ++# incomplete). ++# ++# The sed line replaces "@name@SOME_NAME@value@SOME_VALUE@" strings from the ++# output of 'gcc -S' with "SOME_NAME = SOME_VALUE" strings. ++# The '-n' option, combined with the '/p' command, makes sed output only the ++# modified lines instead of the whole input file. The output is redirected ++# to a .py file; we'll import it in the pretty printers file to read ++# the constants generated by gen-py-const.awk. ++# The regex has two capturing groups, for SOME_NAME and SOME_VALUE ++# respectively. Notice SOME_VALUE may be prepended by a special character, ++# depending on the assembly syntax (e.g. immediates are prefixed by a '$' ++# in AT&T x86, and by a '#' in ARM). We discard it using a complemented set ++# before the second capturing group. ++$(py-const): $(py-const-dir)%.py: %.pysym $(py-const-script) \ ++ $(common-before-compile) ++ $(make-target-directory) ++ $(AWK) -f $(py-const-script) $< \ ++ | $(CC) -S -o $@.tmp $(CFLAGS) $(CPPFLAGS) -x c - ++ echo '# GENERATED FILE\n' > $@.tmp2 ++ echo '# Constant definitions for pretty printers.' >> $@.tmp2 ++ echo '# See gen-py-const.awk for details.\n' >> $@.tmp2 ++ sed -n -r 's/^.*@name@([^@]+)@value@[^[:xdigit:]Xx-]*([[:xdigit:]Xx-]+)@.*/\1 = \2/p' \ ++ $@.tmp >> $@.tmp2 ++ mv -f $@.tmp2 $@ ++ rm -f $@.tmp ++ ++generated += $(py-const) ++endif # gen-py-const-headers + + ifdef gen-as-const-headers + # Generating headers for assembly constants. +diff --git a/NEWS b/NEWS +index b0447e7..3002773 100644 +--- a/NEWS ++++ b/NEWS +@@ -5,6 +5,12 @@ See the end for copying conditions. + Please send GNU C library bug reports via <http://sourceware.org/bugzilla/> + using `glibc' in the "product" field. + ++ ++* GDB pretty printers have been added for mutex and condition variable ++ structures in POSIX Threads. When installed and loaded in gdb these pretty ++ printers show various pthread variables in human-readable form when read ++ using the 'print' or 'display' commands in gdb. ++ + Version 2.24 + + * The minimum Linux kernel version that this version of the GNU C Library +diff --git a/README.pretty-printers b/README.pretty-printers +new file mode 100644 +index 0000000..8662900 +--- /dev/null ++++ b/README.pretty-printers +@@ -0,0 +1,169 @@ ++README for the glibc Python pretty printers ++=========================================== ++ ++Pretty printers are gdb extensions that allow it to print useful, human-readable ++information about a program's variables. For example, for a pthread_mutex_t ++gdb would usually output something like this: ++ ++(gdb) print mutex ++$1 = { ++ __data = { ++ __lock = 22020096, ++ __count = 0, ++ __owner = 0, ++ __nusers = 0, ++ __kind = 576, ++ __spins = 0, ++ __elision = 0, ++ __list = { ++ __prev = 0x0, ++ __next = 0x0 ++ } ++ }, ++ __size = "\000\000P\001", '\000' <repeats 12 times>, "@\002", '\000' <repeats 21 times>, ++ __align = 22020096 ++} ++ ++However, with a pretty printer gdb will output something like this: ++ ++(gdb) print mutex ++$1 = pthread_mutex_t = { ++ Type = Normal, ++ Status = Unlocked, ++ Robust = No, ++ Shared = No, ++ Protocol = Priority protect, ++ Priority ceiling = 42 ++} ++ ++Before printing a value, gdb will first check if there's a pretty printer ++registered for it. If there is, it'll use it, otherwise it'll print the value ++as usual. Pretty printers can be registered in various ways; for our purposes ++we register them for the current objfile by calling ++gdb.printing.register_pretty_printer(). ++ ++Currently our printers are based on gdb.RegexpCollectionPrettyPrinter, which ++means they'll be triggered if the type of the variable we're printing matches ++a given regular expression. For example, MutexPrinter will be triggered if ++our variable's type matches the regexp '^pthread_mutex_t$'. ++ ++Besides the printers themselves, each module may have a constants file which the ++printers will import. These constants are generated from C headers during the ++build process, and need to be in the Python search path when loading the ++printers. ++ ++ ++Installing and loading ++---------------------- ++ ++The pretty printers and their constant files may be installed in different paths ++for each distro, though gdb should be able to automatically load them by itself. ++When in doubt, you can use the 'info pretty-printer' gdb command to list the ++loaded pretty printers. ++ ++If the printers aren't automatically loaded for some reason, you should add the ++following to your .gdbinit: ++ ++python ++import sys ++sys.path.insert(0, '/path/to/constants/file/directory') ++end ++ ++source /path/to/printers.py ++ ++If you're building glibc manually, '/path/to/constants/file/directory' should be ++'/path/to/glibc-build/submodule', where 'submodule' is e.g. nptl. ++ ++ ++Testing ++------- ++ ++The pretty printers come with a small test suite based on PExpect, which is a ++Python module with Expect-like features for spawning and controlling interactive ++programs. Each printer has a corresponding C program and a Python script ++that uses PExpect to drive gdb through the program and compare its output to ++the expected printer's. ++ ++The tests run on the glibc host, which is assumed to have both gdb and PExpect; ++if any of those is absent the tests will fail with code 77 (UNSUPPORTED). ++Native builds can be tested simply by doing 'make check'; cross builds must use ++cross-test-ssh.sh as test-wrapper, like this: ++ ++make test-wrapper='/path/to/scripts/cross-test-ssh.sh user@host' check ++ ++(Remember to share the build system's filesystem with the glibc host's through ++NFS or something similar). ++ ++Running 'make check' on a cross build will only compile the test programs, ++without running the scripts. ++ ++ ++Adding new pretty printers ++-------------------------- ++ ++Adding new pretty printers to glibc requires following these steps: ++ ++1. Identify which constants must be generated from C headers, and write the ++corresponding .pysym file. See scripts/gen-py-const.awk for more information ++on how this works. The name of the .pysym file must be added to the ++'gen-py-const-headers' variable in your submodule's Makefile (without the .pysym ++extension). ++ ++2. Write the pretty printer code itself. For this you can follow the gdb ++Python API documentation, and use the existing printers as examples. The printer ++code must import the generated constants file (which will have the same name ++as your .pysym file). The names of the pretty printer files must be added ++to the 'pretty-printers' variable in your submodule's Makefile (without the .py ++extension). ++ ++3. Write the unit tests for your pretty printers. The build system calls each ++test script passing it the paths to the test program source, the test program ++binary, and the printer files you added to 'pretty-printers' in the previous ++step. The test scripts, in turn, must import scripts/test_printers_common ++and call the init_test function passing it, among other things, the name of the ++set of pretty printers to enable (as seen by running 'info pretty-printer'). ++You can use the existing unit tests as examples. ++ ++4. Add the names of the pretty printer tests to the 'tests-printers' variable ++in your submodule's Makefile (without extensions). In addition, for each test ++program you must define a corresponding CFLAGS-* variable and set it to ++$(CFLAGS-printers-tests) to ensure they're compiled correctly. For example, ++test-foo-printer.c requires the following: ++ ++CFLAGS-test-foo-printer.c := $(CFLAGS-printers-tests) ++ ++Finally, if your programs need to be linked with a specific library, you can add ++its name to the 'tests-printers-libs' variable in your submodule's Makefile. ++ ++ ++Known issues ++------------ ++ ++* Pretty printers are inherently coupled to the code they're targetting, thus ++any changes to the target code must also update the corresponding printers. ++On the plus side, the printer code itself may serve as a kind of documentation ++for the target code. ++ ++* Older versions of the gdb Python API have a bug where ++gdb.RegexpCollectionPrettyPrinter would not be able to get a value's real type ++if it was typedef'd. This would cause gdb to ignore the pretty printers for ++types like pthread_mutex_t, which is defined as: ++ ++typedef union ++{ ++ ... ++} pthread_mutex_t; ++ ++This was fixed in commit 1b588015839caafc608a6944a78aea170f5fb2f6, and released ++as part of gdb 7.8. However, typedef'ing an already typedef'd type may cause ++a similar issue, e.g.: ++ ++typedef pthread_mutex_t mutex; ++mutex a_mutex; ++ ++Here, trying to print a_mutex won't trigger the pthread_mutex_t printer. ++ ++* The test programs must be compiled without optimizations. This is necessary ++because the test scripts rely on the C code structure being preserved when ++stepping through the programs. Things like aggressive instruction reordering ++or optimizing variables out may make this kind of testing impossible. +diff --git a/Rules b/Rules +index 8306d36..10a6479 100644 +--- a/Rules ++++ b/Rules +@@ -85,16 +85,27 @@ common-generated += dummy.o dummy.c + + .PHONY: others tests bench bench-build + ++# Test programs for the pretty printers. ++tests-printers-programs := $(addprefix $(objpfx),$(tests-printers)) ++ ++# .out files with the output of running the pretty printer tests. ++tests-printers-out := $(patsubst %,$(objpfx)%.out,$(tests-printers)) ++ + ifeq ($(build-programs),yes) + others: $(addprefix $(objpfx),$(others) $(sysdep-others) $(extra-objs)) + else + others: $(addprefix $(objpfx),$(extra-objs)) + endif ++ ++# Generate constant files for Python pretty printers if required. ++others: $(py-const) ++ + ifeq ($(run-built-tests),no) +-tests: $(addprefix $(objpfx),$(tests) $(test-srcs)) $(tests-special) ++tests: $(addprefix $(objpfx),$(tests) $(test-srcs)) $(tests-special) \ ++ $(tests-printers-programs) + xtests: tests $(xtests-special) + else +-tests: $(tests:%=$(objpfx)%.out) $(tests-special) ++tests: $(tests:%=$(objpfx)%.out) $(tests-special) $(tests-printers-out) + xtests: tests $(xtests:%=$(objpfx)%.out) $(xtests-special) + endif + +@@ -102,7 +113,8 @@ tests-special-notdir = $(patsubst $(objpfx)%, %, $(tests-special)) + xtests-special-notdir = $(patsubst $(objpfx)%, %, $(xtests-special)) + tests: + $(..)scripts/merge-test-results.sh -s $(objpfx) $(subdir) \ +- $(sort $(tests) $(tests-special-notdir:.out=)) \ ++ $(sort $(tests) $(tests-special-notdir:.out=) \ ++ $(tests-printers)) \ + > $(objpfx)subdir-tests.sum + xtests: + $(..)scripts/merge-test-results.sh -s $(objpfx) $(subdir) \ +@@ -212,6 +224,32 @@ endif + + endif # tests + ++ifneq "$(strip $(tests-printers))" "" ++# We're defining this here for now; later it'll be defined at configure time ++# inside Makeconfig. ++PYTHON := python ++ ++# Static pattern rule for building the test programs for the pretty printers. ++$(tests-printers-programs): %: %.o $(tests-printers-libs) \ ++ $(sort $(filter $(common-objpfx)lib%,$(link-libc-static-tests))) \ ++ $(addprefix $(csu-objpfx),start.o) $(+preinit) $(+postinit) ++ $(+link-printers-tests) ++ ++# Add the paths to the generated constants file and test_common_printers.py ++# to PYTHONPATH so the test scripts can find them. ++py-env := PYTHONPATH=$(py-const-dir):$(..)scripts:$${PYTHONPATH} ++ ++# Static pattern rule that matches the test-* targets to their .c and .py ++# prerequisites. It'll run the corresponding test script for each test program ++# we compiled and place its output in the corresponding .out file. ++# The pretty printer files and test_common_printers.py must be present for all. ++$(tests-printers-out): $(objpfx)%.out: $(objpfx)% %.py %.c $(pretty-printers) \ ++ $(..)scripts/test_printers_common.py ++ $(test-wrapper-env) $(py-env) \ ++ $(PYTHON) $*.py $*.c $(objpfx)$* $(pretty-printers) > $@; \ ++ $(evaluate-test) ++endif ++ + + .PHONY: distclean realclean subdir_distclean subdir_realclean \ + subdir_clean subdir_mostlyclean subdir_testclean +diff --git a/manual/install.texi b/manual/install.texi +index 79ee45f..468479e 100644 +--- a/manual/install.texi ++++ b/manual/install.texi +@@ -256,6 +256,36 @@ occurred. You can specify @samp{stop-on-test-failure=y} when running + @code{make check} to make the test run stop and exit with an error + status immediately when a failure occurs. + ++The @glibcadj{} pretty printers come with their own set of scripts for testing, ++which run together with the rest of the testsuite through @code{make check}. ++These scripts require the following tools to run successfully: ++ ++@itemize @bullet ++@item ++Python 2.7.6/3.4.3 or later ++ ++Python is required for running the printers' test scripts. ++ ++@item PExpect 4.0 ++ ++The printer tests drive GDB through test programs and compare its output ++to the printers'. PExpect is used to capture the output of GDB, and should be ++compatible with the Python version in your system. ++ ++@item ++GDB 7.8 or later with support for Python 2.7.6/3.4.3 or later ++ ++GDB itself needs to be configured with Python support in order to use the ++pretty printers. Notice that your system having Python available doesn't imply ++that GDB supports it, nor that your system's Python and GDB's have the same ++version. ++@end itemize ++ ++@noindent ++If these tools are absent, the printer tests will report themselves as ++@code{UNSUPPORTED}. Notice that some of the printer tests require @theglibc{} ++to be compiled with debugging symbols. ++ + To format the @cite{GNU C Library Reference Manual} for printing, type + @w{@code{make dvi}}. You need a working @TeX{} installation to do + this. The distribution builds the on-line formatted version of the +diff --git a/nptl/Makefile b/nptl/Makefile +index 7dec4ed..49f6ba6 100644 +--- a/nptl/Makefile ++++ b/nptl/Makefile +@@ -308,6 +308,24 @@ gen-as-const-headers = pthread-errnos.sym \ + unwindbuf.sym \ + lowlevelrobustlock.sym pthread-pi-defines.sym + ++gen-py-const-headers := nptl_lock_constants.pysym ++pretty-printers := nptl-printers.py ++tests-printers := test-mutexattr-printers test-mutex-printers \ ++ test-condattr-printers test-cond-printers \ ++ test-rwlockattr-printers test-rwlock-printers ++ ++CFLAGS-test-mutexattr-printers.c := $(CFLAGS-printers-tests) ++CFLAGS-test-mutex-printers.c := $(CFLAGS-printers-tests) ++CFLAGS-test-condattr-printers.c := $(CFLAGS-printers-tests) ++CFLAGS-test-cond-printers.c := $(CFLAGS-printers-tests) ++CFLAGS-test-rwlockattr-printers.c := $(CFLAGS-printers-tests) ++CFLAGS-test-rwlock-printers.c := $(CFLAGS-printers-tests) ++ ++ifeq ($(build-shared),yes) ++tests-printers-libs := $(shared-thread-library) ++else ++tests-printers-libs := $(static-thread-library) ++endif + + LDFLAGS-pthread.so = -Wl,--enable-new-dtags,-z,nodelete,-z,initfirst + +diff --git a/nptl/nptl-printers.py b/nptl/nptl-printers.py +new file mode 100644 +index 0000000..e402f23 +--- /dev/null ++++ b/nptl/nptl-printers.py +@@ -0,0 +1,633 @@ ++# Pretty printers for the NPTL lock types. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++"""This file contains the gdb pretty printers for the following types: ++ ++ * pthread_mutex_t ++ * pthread_mutexattr_t ++ * pthread_cond_t ++ * pthread_condattr_t ++ * pthread_rwlock_t ++ * pthread_rwlockattr_t ++ ++You can check which printers are registered and enabled by issuing the ++'info pretty-printer' gdb command. Printers should trigger automatically when ++trying to print a variable of one of the types mentioned above. ++""" ++ ++from __future__ import print_function ++ ++import gdb ++import gdb.printing ++from nptl_lock_constants import * ++ ++MUTEX_TYPES = { ++ PTHREAD_MUTEX_NORMAL: ('Type', 'Normal'), ++ PTHREAD_MUTEX_RECURSIVE: ('Type', 'Recursive'), ++ PTHREAD_MUTEX_ERRORCHECK: ('Type', 'Error check'), ++ PTHREAD_MUTEX_ADAPTIVE_NP: ('Type', 'Adaptive') ++} ++ ++class MutexPrinter(object): ++ """Pretty printer for pthread_mutex_t.""" ++ ++ def __init__(self, mutex): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ mutex: A gdb.value representing a pthread_mutex_t. ++ """ ++ ++ data = mutex['__data'] ++ self.lock = data['__lock'] ++ self.count = data['__count'] ++ self.owner = data['__owner'] ++ self.kind = data['__kind'] ++ self.values = [] ++ self.read_values() ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_mutex_t. ++ """ ++ ++ return 'pthread_mutex_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_mutex_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the mutex's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ self.read_type() ++ self.read_status() ++ self.read_attributes() ++ self.read_misc_info() ++ ++ def read_type(self): ++ """Read the mutex's type.""" ++ ++ mutex_type = self.kind & PTHREAD_MUTEX_KIND_MASK ++ ++ # mutex_type must be casted to int because it's a gdb.Value ++ self.values.append(MUTEX_TYPES[int(mutex_type)]) ++ ++ def read_status(self): ++ """Read the mutex's status. ++ ++ For architectures which support lock elision, this method reads ++ whether the mutex appears as locked in memory (i.e. it may show it as ++ unlocked even after calling pthread_mutex_lock). ++ """ ++ ++ if self.kind == PTHREAD_MUTEX_DESTROYED: ++ self.values.append(('Status', 'Destroyed')) ++ elif self.kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP: ++ self.read_status_robust() ++ else: ++ self.read_status_no_robust() ++ ++ def read_status_robust(self): ++ """Read the status of a robust mutex. ++ ++ In glibc robust mutexes are implemented in a very different way than ++ non-robust ones. This method reads their locking status, ++ whether it may have waiters, their registered owner (if any), ++ whether the owner is alive or not, and the status of the state ++ they're protecting. ++ """ ++ ++ if self.lock == PTHREAD_MUTEX_UNLOCKED: ++ self.values.append(('Status', 'Unlocked')) ++ else: ++ if self.lock & FUTEX_WAITERS: ++ self.values.append(('Status', 'Locked, possibly with waiters')) ++ else: ++ self.values.append(('Status', ++ 'Locked, possibly with no waiters')) ++ ++ if self.lock & FUTEX_OWNER_DIED: ++ self.values.append(('Owner ID', '%d (dead)' % self.owner)) ++ else: ++ self.values.append(('Owner ID', self.lock & FUTEX_TID_MASK)) ++ ++ if self.owner == PTHREAD_MUTEX_INCONSISTENT: ++ self.values.append(('State protected by this mutex', ++ 'Inconsistent')) ++ elif self.owner == PTHREAD_MUTEX_NOTRECOVERABLE: ++ self.values.append(('State protected by this mutex', ++ 'Not recoverable')) ++ ++ def read_status_no_robust(self): ++ """Read the status of a non-robust mutex. ++ ++ Read info on whether the mutex is locked, if it may have waiters ++ and its owner (if any). ++ """ ++ ++ lock_value = self.lock ++ ++ if self.kind & PTHREAD_MUTEX_PRIO_PROTECT_NP: ++ lock_value &= ~(PTHREAD_MUTEX_PRIO_CEILING_MASK) ++ ++ if lock_value == PTHREAD_MUTEX_UNLOCKED: ++ self.values.append(('Status', 'Unlocked')) ++ else: ++ if self.kind & PTHREAD_MUTEX_PRIO_INHERIT_NP: ++ waiters = self.lock & FUTEX_WAITERS ++ owner = self.lock & FUTEX_TID_MASK ++ else: ++ # Mutex protocol is PP or none ++ waiters = (self.lock != PTHREAD_MUTEX_LOCKED_NO_WAITERS) ++ owner = self.owner ++ ++ if waiters: ++ self.values.append(('Status', 'Locked, possibly with waiters')) ++ else: ++ self.values.append(('Status', ++ 'Locked, possibly with no waiters')) ++ ++ self.values.append(('Owner ID', owner)) ++ ++ def read_attributes(self): ++ """Read the mutex's attributes.""" ++ ++ if self.kind != PTHREAD_MUTEX_DESTROYED: ++ if self.kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP: ++ self.values.append(('Robust', 'Yes')) ++ else: ++ self.values.append(('Robust', 'No')) ++ ++ # In glibc, robust mutexes always have their pshared flag set to ++ # 'shared' regardless of what the pshared flag of their ++ # mutexattr was. Therefore a robust mutex will act as shared ++ # even if it was initialized with a 'private' mutexattr. ++ if self.kind & PTHREAD_MUTEX_PSHARED_BIT: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ self.values.append(('Shared', 'No')) ++ ++ if self.kind & PTHREAD_MUTEX_PRIO_INHERIT_NP: ++ self.values.append(('Protocol', 'Priority inherit')) ++ elif self.kind & PTHREAD_MUTEX_PRIO_PROTECT_NP: ++ prio_ceiling = ((self.lock & PTHREAD_MUTEX_PRIO_CEILING_MASK) ++ >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT) ++ ++ self.values.append(('Protocol', 'Priority protect')) ++ self.values.append(('Priority ceiling', prio_ceiling)) ++ else: ++ # PTHREAD_PRIO_NONE ++ self.values.append(('Protocol', 'None')) ++ ++ def read_misc_info(self): ++ """Read miscellaneous info on the mutex. ++ ++ For now this reads the number of times a recursive mutex was locked ++ by the same thread. ++ """ ++ ++ mutex_type = self.kind & PTHREAD_MUTEX_KIND_MASK ++ ++ if mutex_type == PTHREAD_MUTEX_RECURSIVE and self.count > 1: ++ self.values.append(('Times locked recursively', self.count)) ++ ++class MutexAttributesPrinter(object): ++ """Pretty printer for pthread_mutexattr_t. ++ ++ In the NPTL this is a type that's always casted to struct pthread_mutexattr ++ which has a single 'mutexkind' field containing the actual attributes. ++ """ ++ ++ def __init__(self, mutexattr): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ mutexattr: A gdb.value representing a pthread_mutexattr_t. ++ """ ++ ++ self.values = [] ++ ++ try: ++ mutexattr_struct = gdb.lookup_type('struct pthread_mutexattr') ++ self.mutexattr = mutexattr.cast(mutexattr_struct)['mutexkind'] ++ self.read_values() ++ except gdb.error: ++ # libpthread doesn't have debug symbols, thus we can't find the ++ # real struct type. Just print the union members. ++ self.values.append(('__size', mutexattr['__size'])) ++ self.values.append(('__align', mutexattr['__align'])) ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_mutexattr_t. ++ """ ++ ++ return 'pthread_mutexattr_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_mutexattr_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the mutexattr's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ mutexattr_type = (self.mutexattr ++ & ~PTHREAD_MUTEXATTR_FLAG_BITS ++ & ~PTHREAD_MUTEX_NO_ELISION_NP) ++ ++ # mutexattr_type must be casted to int because it's a gdb.Value ++ self.values.append(MUTEX_TYPES[int(mutexattr_type)]) ++ ++ if self.mutexattr & PTHREAD_MUTEXATTR_FLAG_ROBUST: ++ self.values.append(('Robust', 'Yes')) ++ else: ++ self.values.append(('Robust', 'No')) ++ ++ if self.mutexattr & PTHREAD_MUTEXATTR_FLAG_PSHARED: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ self.values.append(('Shared', 'No')) ++ ++ protocol = ((self.mutexattr & PTHREAD_MUTEXATTR_PROTOCOL_MASK) >> ++ PTHREAD_MUTEXATTR_PROTOCOL_SHIFT) ++ ++ if protocol == PTHREAD_PRIO_NONE: ++ self.values.append(('Protocol', 'None')) ++ elif protocol == PTHREAD_PRIO_INHERIT: ++ self.values.append(('Protocol', 'Priority inherit')) ++ elif protocol == PTHREAD_PRIO_PROTECT: ++ self.values.append(('Protocol', 'Priority protect')) ++ ++CLOCK_IDS = { ++ CLOCK_REALTIME: 'CLOCK_REALTIME', ++ CLOCK_MONOTONIC: 'CLOCK_MONOTONIC', ++ CLOCK_PROCESS_CPUTIME_ID: 'CLOCK_PROCESS_CPUTIME_ID', ++ CLOCK_THREAD_CPUTIME_ID: 'CLOCK_THREAD_CPUTIME_ID', ++ CLOCK_MONOTONIC_RAW: 'CLOCK_MONOTONIC_RAW', ++ CLOCK_REALTIME_COARSE: 'CLOCK_REALTIME_COARSE', ++ CLOCK_MONOTONIC_COARSE: 'CLOCK_MONOTONIC_COARSE' ++} ++ ++class ConditionVariablePrinter(object): ++ """Pretty printer for pthread_cond_t.""" ++ ++ def __init__(self, cond): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ cond: A gdb.value representing a pthread_cond_t. ++ """ ++ ++ # Since PTHREAD_COND_SHARED is an integer, we need to cast it to void * ++ # to be able to compare it to the condvar's __data.__mutex member. ++ # ++ # While it looks like self.shared_value should be a class variable, ++ # that would result in it having an incorrect size if we're loading ++ # these printers through .gdbinit for a 64-bit objfile in AMD64. ++ # This is because gdb initially assumes the pointer size to be 4 bytes, ++ # and only sets it to 8 after loading the 64-bit objfiles. Since ++ # .gdbinit runs before any objfiles are loaded, this would effectively ++ # make self.shared_value have a size of 4, thus breaking later ++ # comparisons with pointers whose types are looked up at runtime. ++ void_ptr_type = gdb.lookup_type('void').pointer() ++ self.shared_value = gdb.Value(PTHREAD_COND_SHARED).cast(void_ptr_type) ++ ++ data = cond['__data'] ++ self.total_seq = data['__total_seq'] ++ self.mutex = data['__mutex'] ++ self.nwaiters = data['__nwaiters'] ++ self.values = [] ++ ++ self.read_values() ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_cond_t. ++ """ ++ ++ return 'pthread_cond_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_cond_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the condvar's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ self.read_status() ++ self.read_attributes() ++ self.read_mutex_info() ++ ++ def read_status(self): ++ """Read the status of the condvar. ++ ++ This method reads whether the condvar is destroyed and how many threads ++ are waiting for it. ++ """ ++ ++ if self.total_seq == PTHREAD_COND_DESTROYED: ++ self.values.append(('Status', 'Destroyed')) ++ ++ self.values.append(('Threads waiting for this condvar', ++ self.nwaiters >> COND_NWAITERS_SHIFT)) ++ ++ def read_attributes(self): ++ """Read the condvar's attributes.""" ++ ++ clock_id = self.nwaiters & ((1 << COND_NWAITERS_SHIFT) - 1) ++ ++ # clock_id must be casted to int because it's a gdb.Value ++ self.values.append(('Clock ID', CLOCK_IDS[int(clock_id)])) ++ ++ shared = (self.mutex == self.shared_value) ++ ++ if shared: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ self.values.append(('Shared', 'No')) ++ ++ def read_mutex_info(self): ++ """Read the data of the mutex this condvar is bound to. ++ ++ A pthread_cond_t's __data.__mutex member is a void * which ++ must be casted to pthread_mutex_t *. For shared condvars, this ++ member isn't recorded and has a special value instead. ++ """ ++ ++ if self.mutex and self.mutex != self.shared_value: ++ mutex_type = gdb.lookup_type('pthread_mutex_t') ++ mutex = self.mutex.cast(mutex_type.pointer()).dereference() ++ ++ self.values.append(('Mutex', mutex)) ++ ++class ConditionVariableAttributesPrinter(object): ++ """Pretty printer for pthread_condattr_t. ++ ++ In the NPTL this is a type that's always casted to struct pthread_condattr, ++ which has a single 'value' field containing the actual attributes. ++ """ ++ ++ def __init__(self, condattr): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ condattr: A gdb.value representing a pthread_condattr_t. ++ """ ++ ++ self.values = [] ++ ++ try: ++ condattr_struct = gdb.lookup_type('struct pthread_condattr') ++ self.condattr = condattr.cast(condattr_struct)['value'] ++ self.read_values() ++ except gdb.error: ++ # libpthread doesn't have debug symbols, thus we can't find the ++ # real struct type. Just print the union members. ++ self.values.append(('__size', condattr['__size'])) ++ self.values.append(('__align', condattr['__align'])) ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_condattr_t. ++ """ ++ ++ return 'pthread_condattr_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_condattr_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the condattr's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ clock_id = self.condattr & ((1 << COND_NWAITERS_SHIFT) - 1) ++ ++ # clock_id must be casted to int because it's a gdb.Value ++ self.values.append(('Clock ID', CLOCK_IDS[int(clock_id)])) ++ ++ if self.condattr & 1: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ self.values.append(('Shared', 'No')) ++ ++class RWLockPrinter(object): ++ """Pretty printer for pthread_rwlock_t.""" ++ ++ def __init__(self, rwlock): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ rwlock: A gdb.value representing a pthread_rwlock_t. ++ """ ++ ++ data = rwlock['__data'] ++ self.readers = data['__nr_readers'] ++ self.queued_readers = data['__nr_readers_queued'] ++ self.queued_writers = data['__nr_writers_queued'] ++ self.writer_id = data['__writer'] ++ self.shared = data['__shared'] ++ self.prefers_writers = data['__flags'] ++ self.values = [] ++ self.read_values() ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_rwlock_t. ++ """ ++ ++ return 'pthread_rwlock_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_rwlock_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the rwlock's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ self.read_status() ++ self.read_attributes() ++ ++ def read_status(self): ++ """Read the status of the rwlock.""" ++ ++ # Right now pthread_rwlock_destroy doesn't do anything, so there's no ++ # way to check if an rwlock is destroyed. ++ ++ if self.writer_id: ++ self.values.append(('Status', 'Locked (Write)')) ++ self.values.append(('Writer ID', self.writer_id)) ++ elif self.readers: ++ self.values.append(('Status', 'Locked (Read)')) ++ self.values.append(('Readers', self.readers)) ++ else: ++ self.values.append(('Status', 'Unlocked')) ++ ++ self.values.append(('Queued readers', self.queued_readers)) ++ self.values.append(('Queued writers', self.queued_writers)) ++ ++ def read_attributes(self): ++ """Read the attributes of the rwlock.""" ++ ++ if self.shared: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ self.values.append(('Shared', 'No')) ++ ++ if self.prefers_writers: ++ self.values.append(('Prefers', 'Writers')) ++ else: ++ self.values.append(('Prefers', 'Readers')) ++ ++class RWLockAttributesPrinter(object): ++ """Pretty printer for pthread_rwlockattr_t. ++ ++ In the NPTL this is a type that's always casted to ++ struct pthread_rwlockattr, which has two fields ('lockkind' and 'pshared') ++ containing the actual attributes. ++ """ ++ ++ def __init__(self, rwlockattr): ++ """Initialize the printer's internal data structures. ++ ++ Args: ++ rwlockattr: A gdb.value representing a pthread_rwlockattr_t. ++ """ ++ ++ self.values = [] ++ ++ try: ++ rwlockattr_struct = gdb.lookup_type('struct pthread_rwlockattr') ++ self.rwlockattr = rwlockattr.cast(rwlockattr_struct) ++ self.read_values() ++ except gdb.error: ++ # libpthread doesn't have debug symbols, thus we can't find the ++ # real struct type. Just print the union members. ++ self.values.append(('__size', rwlockattr['__size'])) ++ self.values.append(('__align', rwlockattr['__align'])) ++ ++ def to_string(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_rwlockattr_t. ++ """ ++ ++ return 'pthread_rwlockattr_t' ++ ++ def children(self): ++ """gdb API function. ++ ++ This is called from gdb when we try to print a pthread_rwlockattr_t. ++ """ ++ ++ return self.values ++ ++ def read_values(self): ++ """Read the rwlockattr's info and store it in self.values. ++ ++ The data contained in self.values will be returned by the Iterator ++ created in self.children. ++ """ ++ ++ rwlock_type = self.rwlockattr['lockkind'] ++ shared = self.rwlockattr['pshared'] ++ ++ if shared == PTHREAD_PROCESS_SHARED: ++ self.values.append(('Shared', 'Yes')) ++ else: ++ # PTHREAD_PROCESS_PRIVATE ++ self.values.append(('Shared', 'No')) ++ ++ if (rwlock_type == PTHREAD_RWLOCK_PREFER_READER_NP or ++ rwlock_type == PTHREAD_RWLOCK_PREFER_WRITER_NP): ++ # This is a known bug. Using PTHREAD_RWLOCK_PREFER_WRITER_NP will ++ # still make the rwlock prefer readers. ++ self.values.append(('Prefers', 'Readers')) ++ elif rwlock_type == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP: ++ self.values.append(('Prefers', 'Writers')) ++ ++def register(objfile): ++ """Register the pretty printers within the given objfile.""" ++ ++ printer = gdb.printing.RegexpCollectionPrettyPrinter('glibc-pthread-locks') ++ ++ printer.add_printer('pthread_mutex_t', r'^pthread_mutex_t$', ++ MutexPrinter) ++ printer.add_printer('pthread_mutexattr_t', r'^pthread_mutexattr_t$', ++ MutexAttributesPrinter) ++ printer.add_printer('pthread_cond_t', r'^pthread_cond_t$', ++ ConditionVariablePrinter) ++ printer.add_printer('pthread_condattr_t', r'^pthread_condattr_t$', ++ ConditionVariableAttributesPrinter) ++ printer.add_printer('pthread_rwlock_t', r'^pthread_rwlock_t$', ++ RWLockPrinter) ++ printer.add_printer('pthread_rwlockattr_t', r'^pthread_rwlockattr_t$', ++ RWLockAttributesPrinter) ++ ++ if objfile == None: ++ objfile = gdb ++ ++ gdb.printing.register_pretty_printer(objfile, printer) ++ ++register(gdb.current_objfile()) +diff --git a/nptl/nptl_lock_constants.pysym b/nptl/nptl_lock_constants.pysym +new file mode 100644 +index 0000000..303ec61 +--- /dev/null ++++ b/nptl/nptl_lock_constants.pysym +@@ -0,0 +1,75 @@ ++#include <pthreadP.h> ++ ++-- Mutex types ++PTHREAD_MUTEX_KIND_MASK PTHREAD_MUTEX_KIND_MASK_NP ++PTHREAD_MUTEX_NORMAL ++PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP ++PTHREAD_MUTEX_ERRORCHECK PTHREAD_MUTEX_ERRORCHECK_NP ++PTHREAD_MUTEX_ADAPTIVE_NP ++ ++-- Mutex status ++-- These are hardcoded all over the code; there are no enums/macros for them. ++PTHREAD_MUTEX_DESTROYED -1 ++PTHREAD_MUTEX_UNLOCKED 0 ++PTHREAD_MUTEX_LOCKED_NO_WAITERS 1 ++ ++-- For robust mutexes ++PTHREAD_MUTEX_INCONSISTENT ++PTHREAD_MUTEX_NOTRECOVERABLE ++FUTEX_OWNER_DIED ++ ++-- For robust and PI mutexes ++FUTEX_WAITERS ++FUTEX_TID_MASK ++ ++-- Mutex attributes ++PTHREAD_MUTEX_ROBUST_NORMAL_NP ++PTHREAD_MUTEX_PRIO_INHERIT_NP ++PTHREAD_MUTEX_PRIO_PROTECT_NP ++PTHREAD_MUTEX_PSHARED_BIT ++PTHREAD_MUTEX_PRIO_CEILING_SHIFT ++PTHREAD_MUTEX_PRIO_CEILING_MASK ++ ++-- Mutex attribute flags ++PTHREAD_MUTEXATTR_PROTOCOL_SHIFT ++PTHREAD_MUTEXATTR_PROTOCOL_MASK ++PTHREAD_MUTEXATTR_PRIO_CEILING_MASK ++PTHREAD_MUTEXATTR_FLAG_ROBUST ++PTHREAD_MUTEXATTR_FLAG_PSHARED ++PTHREAD_MUTEXATTR_FLAG_BITS ++PTHREAD_MUTEX_NO_ELISION_NP ++ ++-- Priority protocols ++PTHREAD_PRIO_NONE ++PTHREAD_PRIO_INHERIT ++PTHREAD_PRIO_PROTECT ++ ++-- These values are hardcoded as well: ++-- Value of __mutex for shared condvars. ++PTHREAD_COND_SHARED (void *)~0l ++ ++-- Value of __total_seq for destroyed condvars. ++PTHREAD_COND_DESTROYED -1ull ++ ++-- __nwaiters encodes the number of threads waiting on a condvar ++-- and the clock ID. ++-- __nwaiters >> COND_NWAITERS_SHIFT gives us the number of waiters. ++COND_NWAITERS_SHIFT ++ ++-- Condvar clock IDs ++CLOCK_REALTIME ++CLOCK_MONOTONIC ++CLOCK_PROCESS_CPUTIME_ID ++CLOCK_THREAD_CPUTIME_ID ++CLOCK_MONOTONIC_RAW ++CLOCK_REALTIME_COARSE ++CLOCK_MONOTONIC_COARSE ++ ++-- Rwlock attributes ++PTHREAD_RWLOCK_PREFER_READER_NP ++PTHREAD_RWLOCK_PREFER_WRITER_NP ++PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP ++ ++-- 'Shared' attribute values ++PTHREAD_PROCESS_PRIVATE ++PTHREAD_PROCESS_SHARED +diff --git a/nptl/test-cond-printers.c b/nptl/test-cond-printers.c +new file mode 100644 +index 0000000..0f2a5f4 +--- /dev/null ++++ b/nptl/test-cond-printers.c +@@ -0,0 +1,57 @@ ++/* Helper program for testing the pthread_cond_t pretty printer. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <time.h> ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++ ++static int test_status_destroyed (pthread_cond_t *condvar); ++ ++int ++main (void) ++{ ++ pthread_cond_t condvar; ++ pthread_condattr_t attr; ++ int result = FAIL; ++ ++ if (pthread_condattr_init (&attr) == 0 ++ && test_status_destroyed (&condvar) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_cond* functions failed. */ ++ ++ return result; ++} ++ ++/* Initializes CONDVAR, then destroys it. */ ++static int ++test_status_destroyed (pthread_cond_t *condvar) ++{ ++ int result = FAIL; ++ ++ if (pthread_cond_init (condvar, NULL) == 0 ++ && pthread_cond_destroy (condvar) == 0) ++ result = PASS; /* Test status (destroyed). */ ++ ++ return result; ++} +diff --git a/nptl/test-cond-printers.py b/nptl/test-cond-printers.py +new file mode 100644 +index 0000000..af0e12e +--- /dev/null ++++ b/nptl/test-cond-printers.py +@@ -0,0 +1,50 @@ ++# Common tests for the ConditionVariablePrinter class. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ var = 'condvar' ++ to_string = 'pthread_cond_t' ++ ++ break_at(test_source, 'Test status (destroyed)') ++ continue_cmd() # Go to test_status_destroyed ++ test_printer(var, to_string, {'Status': 'Destroyed'}) ++ ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/nptl/test-condattr-printers.c b/nptl/test-condattr-printers.c +new file mode 100644 +index 0000000..4db4098 +--- /dev/null ++++ b/nptl/test-condattr-printers.c +@@ -0,0 +1,94 @@ ++/* Helper program for testing the pthread_cond_t and pthread_condattr_t ++ pretty printers. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <time.h> ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++ ++static int condvar_reinit (pthread_cond_t *condvar, ++ const pthread_condattr_t *attr); ++static int test_setclock (pthread_cond_t *condvar, pthread_condattr_t *attr); ++static int test_setpshared (pthread_cond_t *condvar, pthread_condattr_t *attr); ++ ++/* Need these so we don't have lines longer than 79 chars. */ ++#define SET_SHARED(attr, shared) pthread_condattr_setpshared (attr, shared) ++ ++int ++main (void) ++{ ++ pthread_cond_t condvar; ++ pthread_condattr_t attr; ++ int result = FAIL; ++ ++ if (pthread_condattr_init (&attr) == 0 ++ && pthread_cond_init (&condvar, NULL) == 0 ++ && test_setclock (&condvar, &attr) == PASS ++ && test_setpshared (&condvar, &attr) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_cond* functions failed. */ ++ ++ return result; ++} ++ ++/* Destroys CONDVAR and re-initializes it using ATTR. */ ++static int ++condvar_reinit (pthread_cond_t *condvar, const pthread_condattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_cond_destroy (condvar) == 0 ++ && pthread_cond_init (condvar, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting the clock ID attribute. */ ++static int ++test_setclock (pthread_cond_t *condvar, pthread_condattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_condattr_setclock (attr, CLOCK_REALTIME) == 0 /* Set clock. */ ++ && condvar_reinit (condvar, attr) == PASS) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting whether the condvar can be shared between processes. */ ++static int ++test_setpshared (pthread_cond_t *condvar, pthread_condattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_SHARED (attr, PTHREAD_PROCESS_SHARED) == 0 /* Set shared. */ ++ && condvar_reinit (condvar, attr) == PASS ++ && SET_SHARED (attr, PTHREAD_PROCESS_PRIVATE) == 0 ++ && condvar_reinit (condvar, attr) == PASS) ++ result = PASS; ++ ++ return result; ++} +diff --git a/nptl/test-condattr-printers.py b/nptl/test-condattr-printers.py +new file mode 100644 +index 0000000..7ea01db +--- /dev/null ++++ b/nptl/test-condattr-printers.py +@@ -0,0 +1,71 @@ ++# Common tests for the ConditionVariablePrinter and ++# ConditionVariableAttributesPrinter classes. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ check_debug_symbol('struct pthread_condattr') ++ ++ condvar_var = 'condvar' ++ condvar_to_string = 'pthread_cond_t' ++ ++ attr_var = 'attr' ++ attr_to_string = 'pthread_condattr_t' ++ ++ break_at(test_source, 'Set clock') ++ continue_cmd() # Go to test_setclock ++ next_cmd(2) ++ test_printer(condvar_var, condvar_to_string, {'Clock ID': 'CLOCK_REALTIME'}) ++ test_printer(attr_var, attr_to_string, {'Clock ID': 'CLOCK_REALTIME'}) ++ ++ break_at(test_source, 'Set shared') ++ continue_cmd() # Go to test_setpshared ++ next_cmd(2) ++ test_printer(condvar_var, condvar_to_string, {'Shared': 'Yes'}) ++ test_printer(attr_var, attr_to_string, {'Shared': 'Yes'}) ++ next_cmd(2) ++ test_printer(condvar_var, condvar_to_string, {'Shared': 'No'}) ++ test_printer(attr_var, attr_to_string, {'Shared': 'No'}) ++ ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++except DebugError as exception: ++ print(exception) ++ result = UNSUPPORTED ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/nptl/test-mutex-printers.c b/nptl/test-mutex-printers.c +new file mode 100644 +index 0000000..b973e82 +--- /dev/null ++++ b/nptl/test-mutex-printers.c +@@ -0,0 +1,151 @@ ++/* Helper program for testing the pthread_mutex_t pretty printer. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <stdlib.h> ++#include <errno.h> ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++ ++static int test_status_destroyed (pthread_mutex_t *mutex); ++static int test_status_no_robust (pthread_mutex_t *mutex, ++ pthread_mutexattr_t *attr); ++static int test_status_robust (pthread_mutex_t *mutex, ++ pthread_mutexattr_t *attr); ++static int test_locking_state_robust (pthread_mutex_t *mutex); ++static void *thread_func (void *arg); ++static int test_recursive_locks (pthread_mutex_t *mutex, ++ pthread_mutexattr_t *attr); ++ ++int ++main (void) ++{ ++ pthread_mutex_t mutex; ++ pthread_mutexattr_t attr; ++ int result = FAIL; ++ ++ if (pthread_mutexattr_init (&attr) == 0 ++ && test_status_destroyed (&mutex) == PASS ++ && test_status_no_robust (&mutex, &attr) == PASS ++ && test_status_robust (&mutex, &attr) == PASS ++ && test_recursive_locks (&mutex, &attr) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_mutex* functions failed. */ ++ ++ return result; ++} ++ ++/* Initializes MUTEX, then destroys it. */ ++static int ++test_status_destroyed (pthread_mutex_t *mutex) ++{ ++ int result = FAIL; ++ ++ if (pthread_mutex_init (mutex, NULL) == 0 ++ && pthread_mutex_destroy (mutex) == 0) ++ result = PASS; /* Test status (destroyed). */ ++ ++ return result; ++} ++ ++/* Tests locking of non-robust mutexes. */ ++static int ++test_status_no_robust (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_mutexattr_setrobust (attr, PTHREAD_MUTEX_STALLED) == 0 ++ && pthread_mutex_init (mutex, attr) == 0 ++ && pthread_mutex_lock (mutex) == 0 /* Test status (non-robust). */ ++ && pthread_mutex_unlock (mutex) == 0 ++ && pthread_mutex_destroy (mutex) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests locking of robust mutexes. */ ++static int ++test_status_robust (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_mutexattr_setrobust (attr, PTHREAD_MUTEX_ROBUST) == 0 ++ && pthread_mutex_init (mutex, attr) == 0 ++ && test_locking_state_robust (mutex) == PASS /* Test status (robust). */ ++ && pthread_mutex_destroy (mutex) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests locking and state corruption of robust mutexes. We'll mark it as ++ inconsistent, then not recoverable. */ ++static int ++test_locking_state_robust (pthread_mutex_t *mutex) ++{ ++ int result = FAIL; ++ pthread_t thread; ++ ++ if (pthread_create (&thread, NULL, thread_func, mutex) == 0 /* Create. */ ++ && pthread_join (thread, NULL) == 0 ++ && pthread_mutex_lock (mutex) == EOWNERDEAD /* Test locking (robust). */ ++ && pthread_mutex_unlock (mutex) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Function to be called by the child thread when testing robust mutexes. */ ++static void * ++thread_func (void *arg) ++{ ++ pthread_mutex_t *mutex = (pthread_mutex_t *)arg; ++ ++ if (pthread_mutex_lock (mutex) != 0) /* Thread function. */ ++ exit (FAIL); ++ ++ /* Thread terminates without unlocking the mutex, thus marking it as ++ inconsistent. */ ++ return NULL; ++} ++ ++/* Tests locking the mutex multiple times in a row. */ ++static int ++test_recursive_locks (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_mutexattr_settype (attr, PTHREAD_MUTEX_RECURSIVE) == 0 ++ && pthread_mutex_init (mutex, attr) == 0 ++ && pthread_mutex_lock (mutex) == 0 ++ && pthread_mutex_lock (mutex) == 0 ++ && pthread_mutex_lock (mutex) == 0 /* Test recursive locks. */ ++ && pthread_mutex_unlock (mutex) == 0 ++ && pthread_mutex_unlock (mutex) == 0 ++ && pthread_mutex_unlock (mutex) == 0 ++ && pthread_mutex_destroy (mutex) == 0) ++ result = PASS; ++ ++ return result; ++} +diff --git a/nptl/test-mutex-printers.py b/nptl/test-mutex-printers.py +new file mode 100644 +index 0000000..7f542ad +--- /dev/null ++++ b/nptl/test-mutex-printers.py +@@ -0,0 +1,97 @@ ++# Tests for the MutexPrinter class. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ var = 'mutex' ++ to_string = 'pthread_mutex_t' ++ ++ break_at(test_source, 'Test status (destroyed)') ++ continue_cmd() # Go to test_status_destroyed ++ test_printer(var, to_string, {'Status': 'Destroyed'}) ++ ++ break_at(test_source, 'Test status (non-robust)') ++ continue_cmd() # Go to test_status_no_robust ++ test_printer(var, to_string, {'Status': 'Unlocked'}) ++ next_cmd() ++ thread_id = get_current_thread_lwpid() ++ test_printer(var, to_string, {'Status': 'Locked, possibly with no waiters', ++ 'Owner ID': thread_id}) ++ ++ break_at(test_source, 'Test status (robust)') ++ continue_cmd() # Go to test_status_robust ++ test_printer(var, to_string, {'Status': 'Unlocked'}) ++ ++ # We'll now test the robust mutex locking states. We'll create a new ++ # thread that will lock a robust mutex and exit without unlocking it. ++ break_at(test_source, 'Create') ++ continue_cmd() # Go to test_locking_state_robust ++ # Set a breakpoint for the new thread to hit. ++ break_at(test_source, 'Thread function') ++ continue_cmd() ++ # By now the new thread is created and has hit its breakpoint. ++ set_scheduler_locking(True) ++ parent = 1 ++ child = 2 ++ select_thread(child) ++ child_id = get_current_thread_lwpid() ++ # We've got the new thread's ID. ++ select_thread(parent) ++ # Make the new thread finish its function while we wait. ++ continue_cmd(thread=child) ++ # The new thread should be dead by now. ++ break_at(test_source, 'Test locking (robust)') ++ continue_cmd() ++ test_printer(var, to_string, {'Owner ID': r'{0} \(dead\)'.format(child_id)}) ++ # Try to lock and unlock the mutex. ++ next_cmd() ++ test_printer(var, to_string, {'Owner ID': thread_id, ++ 'State protected by this mutex': 'Inconsistent'}) ++ next_cmd() ++ test_printer(var, to_string, {'Status': 'Unlocked', ++ 'State protected by this mutex': 'Not recoverable'}) ++ set_scheduler_locking(False) ++ ++ break_at(test_source, 'Test recursive locks') ++ continue_cmd() # Go to test_recursive_locks ++ test_printer(var, to_string, {'Times locked recursively': '2'}) ++ next_cmd() ++ test_printer(var, to_string, {'Times locked recursively': '3'}) ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/nptl/test-mutexattr-printers.c b/nptl/test-mutexattr-printers.c +new file mode 100644 +index 0000000..9ecfff7 +--- /dev/null ++++ b/nptl/test-mutexattr-printers.c +@@ -0,0 +1,144 @@ ++/* Helper program for testing the pthread_mutex_t and pthread_mutexattr_t ++ pretty printers. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++#define PRIOCEILING 42 ++ ++/* Need these so we don't have lines longer than 79 chars. */ ++#define SET_TYPE(attr, type) pthread_mutexattr_settype (attr, type) ++#define SET_ROBUST(attr, robust) pthread_mutexattr_setrobust (attr, robust) ++#define SET_SHARED(attr, shared) pthread_mutexattr_setpshared (attr, shared) ++#define SET_PROTOCOL(attr, protocol) \ ++ pthread_mutexattr_setprotocol (attr, protocol) ++#define SET_PRIOCEILING(mutex, prioceiling, old_ceiling) \ ++ pthread_mutex_setprioceiling (mutex, prioceiling, old_ceiling) ++ ++static int mutex_reinit (pthread_mutex_t *mutex, ++ const pthread_mutexattr_t *attr); ++static int test_settype (pthread_mutex_t *mutex, pthread_mutexattr_t *attr); ++static int test_setrobust (pthread_mutex_t *mutex, pthread_mutexattr_t *attr); ++static int test_setpshared (pthread_mutex_t *mutex, pthread_mutexattr_t *attr); ++static int test_setprotocol (pthread_mutex_t *mutex, ++ pthread_mutexattr_t *attr); ++ ++int ++main (void) ++{ ++ pthread_mutex_t mutex; ++ pthread_mutexattr_t attr; ++ int result = FAIL; ++ ++ if (pthread_mutexattr_init (&attr) == 0 ++ && pthread_mutex_init (&mutex, NULL) == 0 ++ && test_settype (&mutex, &attr) == PASS ++ && test_setrobust (&mutex, &attr) == PASS ++ && test_setpshared (&mutex, &attr) == PASS ++ && test_setprotocol (&mutex, &attr) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_mutex* functions failed. */ ++ ++ return result; ++} ++ ++/* Destroys MUTEX and re-initializes it using ATTR. */ ++static int ++mutex_reinit (pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_mutex_destroy (mutex) == 0 ++ && pthread_mutex_init (mutex, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting the mutex type. */ ++static int ++test_settype (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_TYPE (attr, PTHREAD_MUTEX_ERRORCHECK) == 0 /* Set type. */ ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_TYPE (attr, PTHREAD_MUTEX_RECURSIVE) == 0 ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_TYPE (attr, PTHREAD_MUTEX_NORMAL) == 0 ++ && mutex_reinit (mutex, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting whether the mutex is robust. */ ++static int ++test_setrobust (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_ROBUST (attr, PTHREAD_MUTEX_ROBUST) == 0 /* Set robust. */ ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_ROBUST (attr, PTHREAD_MUTEX_STALLED) == 0 ++ && mutex_reinit (mutex, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting whether the mutex can be shared between processes. */ ++static int ++test_setpshared (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_SHARED (attr, PTHREAD_PROCESS_SHARED) == 0 /* Set shared. */ ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_SHARED (attr, PTHREAD_PROCESS_PRIVATE) == 0 ++ && mutex_reinit (mutex, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting the mutex protocol and, for Priority Protect, the Priority ++ Ceiling. */ ++static int ++test_setprotocol (pthread_mutex_t *mutex, pthread_mutexattr_t *attr) ++{ ++ int result = FAIL; ++ int old_prioceiling; ++ ++ if (SET_PROTOCOL (attr, PTHREAD_PRIO_INHERIT) == 0 /* Set protocol. */ ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_PROTOCOL (attr, PTHREAD_PRIO_PROTECT) == 0 ++ && mutex_reinit (mutex, attr) == 0 ++ && SET_PRIOCEILING(mutex, PRIOCEILING, &old_prioceiling) == 0 ++ && SET_PROTOCOL (attr, PTHREAD_PRIO_NONE) == 0 ++ && mutex_reinit (mutex, attr) == 0) ++ result = PASS; ++ ++ return result; ++} +diff --git a/nptl/test-mutexattr-printers.py b/nptl/test-mutexattr-printers.py +new file mode 100644 +index 0000000..4464723 +--- /dev/null ++++ b/nptl/test-mutexattr-printers.py +@@ -0,0 +1,101 @@ ++# Common tests for the MutexPrinter and MutexAttributesPrinter classes. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++PRIOCEILING = 42 ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ check_debug_symbol('struct pthread_mutexattr') ++ ++ mutex_var = 'mutex' ++ mutex_to_string = 'pthread_mutex_t' ++ ++ attr_var = 'attr' ++ attr_to_string = 'pthread_mutexattr_t' ++ ++ break_at(test_source, 'Set type') ++ continue_cmd() # Go to test_settype ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Type': 'Error check'}) ++ test_printer(mutex_var, mutex_to_string, {'Type': 'Error check'}) ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Type': 'Recursive'}) ++ test_printer(mutex_var, mutex_to_string, {'Type': 'Recursive'}) ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Type': 'Normal'}) ++ test_printer(mutex_var, mutex_to_string, {'Type': 'Normal'}) ++ ++ break_at(test_source, 'Set robust') ++ continue_cmd() # Go to test_setrobust ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Robust': 'Yes'}) ++ test_printer(mutex_var, mutex_to_string, {'Robust': 'Yes'}) ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Robust': 'No'}) ++ test_printer(mutex_var, mutex_to_string, {'Robust': 'No'}) ++ ++ break_at(test_source, 'Set shared') ++ continue_cmd() # Go to test_setpshared ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Shared': 'Yes'}) ++ test_printer(mutex_var, mutex_to_string, {'Shared': 'Yes'}) ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Shared': 'No'}) ++ test_printer(mutex_var, mutex_to_string, {'Shared': 'No'}) ++ ++ break_at(test_source, 'Set protocol') ++ continue_cmd() # Go to test_setprotocol ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Protocol': 'Priority inherit'}) ++ test_printer(mutex_var, mutex_to_string, {'Protocol': 'Priority inherit'}) ++ next_cmd(2) ++ test_printer(attr_var, attr_to_string, {'Protocol': 'Priority protect'}) ++ test_printer(mutex_var, mutex_to_string, {'Protocol': 'Priority protect'}) ++ next_cmd(2) ++ test_printer(mutex_var, mutex_to_string, {'Priority ceiling': ++ str(PRIOCEILING)}) ++ next_cmd() ++ test_printer(attr_var, attr_to_string, {'Protocol': 'None'}) ++ test_printer(mutex_var, mutex_to_string, {'Protocol': 'None'}) ++ ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++except DebugError as exception: ++ print(exception) ++ result = UNSUPPORTED ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/nptl/test-rwlock-printers.c b/nptl/test-rwlock-printers.c +new file mode 100644 +index 0000000..dbbe9b8 +--- /dev/null ++++ b/nptl/test-rwlock-printers.c +@@ -0,0 +1,78 @@ ++/* Helper program for testing the pthread_rwlock_t pretty printer. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++ ++static int test_locking_reader (pthread_rwlock_t *rwlock); ++static int test_locking_writer (pthread_rwlock_t *rwlock); ++ ++int ++main (void) ++{ ++ pthread_rwlock_t rwlock; ++ ++ int result = FAIL; ++ ++ if (test_locking_reader (&rwlock) == PASS ++ && test_locking_writer (&rwlock) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_rwlock* functions failed. */ ++ ++ return result; ++} ++ ++/* Tests locking the rwlock multiple times as a reader. */ ++static int ++test_locking_reader (pthread_rwlock_t *rwlock) ++{ ++ int result = FAIL; ++ ++ if (pthread_rwlock_init (rwlock, NULL) == 0 ++ && pthread_rwlock_rdlock (rwlock) == 0 /* Test locking (reader). */ ++ && pthread_rwlock_rdlock (rwlock) == 0 ++ && pthread_rwlock_rdlock (rwlock) == 0 ++ && pthread_rwlock_unlock (rwlock) == 0 ++ && pthread_rwlock_unlock (rwlock) == 0 ++ && pthread_rwlock_unlock (rwlock) == 0 ++ && pthread_rwlock_destroy (rwlock) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests locking the rwlock as a writer. */ ++static int ++test_locking_writer (pthread_rwlock_t *rwlock) ++{ ++ int result = FAIL; ++ ++ if (pthread_rwlock_init (rwlock, NULL) == 0 ++ && pthread_rwlock_wrlock (rwlock) == 0 /* Test locking (writer). */ ++ && pthread_rwlock_unlock (rwlock) == 0 ++ && pthread_rwlock_destroy (rwlock) == 0) ++ result = PASS; ++ ++ return result; ++} +diff --git a/nptl/test-rwlock-printers.py b/nptl/test-rwlock-printers.py +new file mode 100644 +index 0000000..b972fa6 +--- /dev/null ++++ b/nptl/test-rwlock-printers.py +@@ -0,0 +1,64 @@ ++# Common tests for the RWLockPrinter class. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ var = 'rwlock' ++ to_string = 'pthread_rwlock_t' ++ ++ break_at(test_source, 'Test locking (reader)') ++ continue_cmd() # Go to test_locking_reader ++ test_printer(var, to_string, {'Status': 'Unlocked'}) ++ next_cmd() ++ test_printer(var, to_string, {'Status': r'Locked \(Read\)', 'Readers': '1'}) ++ next_cmd() ++ test_printer(var, to_string, {'Readers': '2'}) ++ next_cmd() ++ test_printer(var, to_string, {'Readers': '3'}) ++ ++ break_at(test_source, 'Test locking (writer)') ++ continue_cmd() # Go to test_locking_writer ++ test_printer(var, to_string, {'Status': 'Unlocked'}) ++ next_cmd() ++ thread_id = get_current_thread_lwpid() ++ test_printer(var, to_string, {'Status': r'Locked \(Write\)', ++ 'Writer ID': thread_id}) ++ ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/nptl/test-rwlockattr-printers.c b/nptl/test-rwlockattr-printers.c +new file mode 100644 +index 0000000..d12facf +--- /dev/null ++++ b/nptl/test-rwlockattr-printers.c +@@ -0,0 +1,98 @@ ++/* Helper program for testing the pthread_rwlock_t and pthread_rwlockattr_t ++ pretty printers. ++ ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++/* Keep the calls to the pthread_* functions on separate lines to make it easy ++ to advance through the program using the gdb 'next' command. */ ++ ++#include <pthread.h> ++ ++#define PASS 0 ++#define FAIL 1 ++ ++/* Need these so we don't have lines longer than 79 chars. */ ++#define SET_KIND(attr, kind) pthread_rwlockattr_setkind_np (attr, kind) ++#define SET_SHARED(attr, shared) pthread_rwlockattr_setpshared (attr, shared) ++ ++static int rwlock_reinit (pthread_rwlock_t *rwlock, ++ const pthread_rwlockattr_t *attr); ++static int test_setkind_np (pthread_rwlock_t *rwlock, ++ pthread_rwlockattr_t *attr); ++static int test_setpshared (pthread_rwlock_t *rwlock, ++ pthread_rwlockattr_t *attr); ++ ++int ++main (void) ++{ ++ pthread_rwlock_t rwlock; ++ pthread_rwlockattr_t attr; ++ int result = FAIL; ++ ++ if (pthread_rwlockattr_init (&attr) == 0 ++ && pthread_rwlock_init (&rwlock, NULL) == 0 ++ && test_setkind_np (&rwlock, &attr) == PASS ++ && test_setpshared (&rwlock, &attr) == PASS) ++ result = PASS; ++ /* Else, one of the pthread_rwlock* functions failed. */ ++ ++ return result; ++} ++ ++/* Destroys RWLOCK and re-initializes it using ATTR. */ ++static int ++rwlock_reinit (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (pthread_rwlock_destroy (rwlock) == 0 ++ && pthread_rwlock_init (rwlock, attr) == 0) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting whether the rwlock prefers readers or writers. */ ++static int ++test_setkind_np (pthread_rwlock_t *rwlock, pthread_rwlockattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_KIND (attr, PTHREAD_RWLOCK_PREFER_READER_NP) == 0 /* Set kind. */ ++ && rwlock_reinit (rwlock, attr) == PASS ++ && SET_KIND (attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP) == 0 ++ && rwlock_reinit (rwlock, attr) == PASS) ++ result = PASS; ++ ++ return result; ++} ++ ++/* Tests setting whether the rwlock can be shared between processes. */ ++static int ++test_setpshared (pthread_rwlock_t *rwlock, pthread_rwlockattr_t *attr) ++{ ++ int result = FAIL; ++ ++ if (SET_SHARED (attr, PTHREAD_PROCESS_SHARED) == 0 /* Set shared. */ ++ && rwlock_reinit (rwlock, attr) == PASS ++ && SET_SHARED (attr, PTHREAD_PROCESS_PRIVATE) == 0 ++ && rwlock_reinit (rwlock, attr) == PASS) ++ result = PASS; ++ ++ return result; ++} +diff --git a/nptl/test-rwlockattr-printers.py b/nptl/test-rwlockattr-printers.py +new file mode 100644 +index 0000000..1ca2dc6 +--- /dev/null ++++ b/nptl/test-rwlockattr-printers.py +@@ -0,0 +1,73 @@ ++# Common tests for the RWLockPrinter and RWLockAttributesPrinter classes. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++import sys ++ ++from test_printers_common import * ++ ++test_source = sys.argv[1] ++test_bin = sys.argv[2] ++printer_files = sys.argv[3:] ++printer_names = ['global glibc-pthread-locks'] ++ ++try: ++ init_test(test_bin, printer_files, printer_names) ++ go_to_main() ++ ++ check_debug_symbol('struct pthread_rwlockattr') ++ ++ rwlock_var = 'rwlock' ++ rwlock_to_string = 'pthread_rwlock_t' ++ ++ attr_var = 'attr' ++ attr_to_string = 'pthread_rwlockattr_t' ++ ++ break_at(test_source, 'Set kind') ++ continue_cmd() # Go to test_setkind_np ++ next_cmd(2) ++ test_printer(rwlock_var, rwlock_to_string, {'Prefers': 'Readers'}) ++ test_printer(attr_var, attr_to_string, {'Prefers': 'Readers'}) ++ next_cmd(2) ++ test_printer(rwlock_var, rwlock_to_string, {'Prefers': 'Writers'}) ++ test_printer(attr_var, attr_to_string, {'Prefers': 'Writers'}) ++ ++ break_at(test_source, 'Set shared') ++ continue_cmd() # Go to test_setpshared ++ next_cmd(2) ++ test_printer(rwlock_var, rwlock_to_string, {'Shared': 'Yes'}) ++ test_printer(attr_var, attr_to_string, {'Shared': 'Yes'}) ++ next_cmd(2) ++ test_printer(rwlock_var, rwlock_to_string, {'Shared': 'No'}) ++ test_printer(attr_var, attr_to_string, {'Shared': 'No'}) ++ ++ continue_cmd() # Exit ++ ++except (NoLineError, pexpect.TIMEOUT) as exception: ++ print('Error: {0}'.format(exception)) ++ result = FAIL ++ ++except DebugError as exception: ++ print(exception) ++ result = UNSUPPORTED ++ ++else: ++ print('Test succeeded.') ++ result = PASS ++ ++exit(result) +diff --git a/scripts/gen-py-const.awk b/scripts/gen-py-const.awk +new file mode 100644 +index 0000000..4586f59 +--- /dev/null ++++ b/scripts/gen-py-const.awk +@@ -0,0 +1,118 @@ ++# Script to generate constants for Python pretty printers. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++# This script is a smaller version of the clever gen-asm-const.awk hack used to ++# generate ASM constants from .sym files. We'll use this to generate constants ++# for Python pretty printers. ++# ++# The input to this script are .pysym files that look like: ++# #C_Preprocessor_Directive... ++# NAME1 ++# NAME2 expression... ++# ++# A line giving just a name implies an expression consisting of just that name. ++# Comments start with '--'. ++# ++# The output of this script is a 'dummy' function containing 'asm' declarations ++# for each non-preprocessor line in the .pysym file. The expression values ++# will appear as input operands to the 'asm' declaration. For example, if we ++# have: ++# ++# /* header.h */ ++# #define MACRO 42 ++# ++# struct S { ++# char c1; ++# char c2; ++# char c3; ++# }; ++# ++# enum E { ++# ZERO, ++# ONE ++# }; ++# ++# /* symbols.pysym */ ++# #include <stddef.h> ++# #include "header.h" ++# -- This is a comment ++# MACRO ++# C3_OFFSET offsetof(struct S, c3) ++# E_ONE ONE ++# ++# the output will be: ++# ++# #include <stddef.h> ++# #include "header.h" ++# void dummy(void) ++# { ++# asm ("@name@MACRO@value@%0@" : : "i" (MACRO)); ++# asm ("@name@C3_OFFSET@value@%0@" : : "i" (offsetof(struct S, c3))); ++# asm ("@name@E_ONE@value@%0@" : : "i" (ONE)); ++# } ++# ++# We'll later feed this output to gcc -S. Since '-S' tells gcc to compile but ++# not assemble, gcc will output something like: ++# ++# dummy: ++# ... ++# @name@MACRO@value@$42@ ++# @name@C3_OFFSET@value@$2@ ++# @name@E_ONE@value@$1@ ++# ++# Finally, we can process that output to extract the constant values. ++# Notice gcc may prepend a special character such as '$' to each value. ++ ++# found_symbol indicates whether we found a non-comment, non-preprocessor line. ++BEGIN { found_symbol = 0 } ++ ++# C preprocessor directives go straight through. ++/^#/ { print; next; } ++ ++# Skip comments. ++/--/ { next; } ++ ++# Trim leading whitespace. ++{ sub(/^[[:blank:]]*/, ""); } ++ ++# If we found a non-comment, non-preprocessor line, print the 'dummy' function ++# header. ++NF > 0 && !found_symbol { ++ print "void dummy(void)\n{"; ++ found_symbol = 1; ++} ++ ++# If the line contains just a name, duplicate it so we can use that name ++# as the value of the expression. ++NF == 1 { sub(/^.*$/, "& &"); } ++ ++# If a line contains a name and an expression... ++NF > 1 { ++ name = $1; ++ ++ # Remove any characters before the second field. ++ sub(/^[^[:blank:]]+[[:blank:]]+/, ""); ++ ++ # '$0' ends up being everything that appeared after the first field ++ # separator. ++ printf " asm (\"@name@%s@value@%0@\" : : \"i\" (%s));\n", name, $0; ++} ++ ++# Close the 'dummy' function. ++END { if (found_symbol) print "}"; } +diff --git a/scripts/test_printers_common.py b/scripts/test_printers_common.py +new file mode 100644 +index 0000000..c79d7e3 +--- /dev/null ++++ b/scripts/test_printers_common.py +@@ -0,0 +1,364 @@ ++# Common functions and variables for testing the Python pretty printers. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++"""These tests require PExpect 4.0 or newer. ++ ++Exported constants: ++ PASS, FAIL, UNSUPPORTED (int): Test exit codes, as per evaluate-test.sh. ++""" ++ ++import os ++import re ++from test_printers_exceptions import * ++ ++PASS = 0 ++FAIL = 1 ++UNSUPPORTED = 77 ++ ++gdb_bin = 'gdb' ++gdb_options = '-q -nx' ++gdb_invocation = '{0} {1}'.format(gdb_bin, gdb_options) ++pexpect_min_version = 4 ++gdb_min_version = (7, 8) ++encoding = 'utf-8' ++ ++try: ++ import pexpect ++except ImportError: ++ print('PExpect 4.0 or newer must be installed to test the pretty printers.') ++ exit(UNSUPPORTED) ++ ++pexpect_version = pexpect.__version__.split('.')[0] ++ ++if int(pexpect_version) < pexpect_min_version: ++ print('PExpect 4.0 or newer must be installed to test the pretty printers.') ++ exit(UNSUPPORTED) ++ ++if not pexpect.which(gdb_bin): ++ print('gdb 7.8 or newer must be installed to test the pretty printers.') ++ exit(UNSUPPORTED) ++ ++timeout = 5 ++TIMEOUTFACTOR = os.environ.get('TIMEOUTFACTOR') ++ ++if TIMEOUTFACTOR: ++ timeout = int(TIMEOUTFACTOR) ++ ++try: ++ # Check the gdb version. ++ version_cmd = '{0} --version'.format(gdb_invocation, timeout=timeout) ++ gdb_version_out = pexpect.run(version_cmd, encoding=encoding) ++ ++ # The gdb version string is "GNU gdb <PKGVERSION><version>", where ++ # PKGVERSION can be any text. We assume that there'll always be a space ++ # between PKGVERSION and the version number for the sake of the regexp. ++ version_match = re.search(r'GNU gdb .* ([1-9]+)\.([0-9]+)', gdb_version_out) ++ ++ if not version_match: ++ print('The gdb version string (gdb -v) is incorrectly formatted.') ++ exit(UNSUPPORTED) ++ ++ gdb_version = (int(version_match.group(1)), int(version_match.group(2))) ++ ++ if gdb_version < gdb_min_version: ++ print('gdb 7.8 or newer must be installed to test the pretty printers.') ++ exit(UNSUPPORTED) ++ ++ # Check if gdb supports Python. ++ gdb_python_cmd = '{0} -ex "python import os" -batch'.format(gdb_invocation, ++ timeout=timeout) ++ gdb_python_error = pexpect.run(gdb_python_cmd, encoding=encoding) ++ ++ if gdb_python_error: ++ print('gdb must have python support to test the pretty printers.') ++ exit(UNSUPPORTED) ++ ++ # If everything's ok, spawn the gdb process we'll use for testing. ++ gdb = pexpect.spawn(gdb_invocation, echo=False, timeout=timeout, ++ encoding=encoding) ++ gdb_prompt = u'\(gdb\)' ++ gdb.expect(gdb_prompt) ++ ++except pexpect.ExceptionPexpect as exception: ++ print('Error: {0}'.format(exception)) ++ exit(FAIL) ++ ++def test(command, pattern=None): ++ """Sends 'command' to gdb and expects the given 'pattern'. ++ ++ If 'pattern' is None, simply consumes everything up to and including ++ the gdb prompt. ++ ++ Args: ++ command (string): The command we'll send to gdb. ++ pattern (raw string): A pattern the gdb output should match. ++ ++ Returns: ++ string: The string that matched 'pattern', or an empty string if ++ 'pattern' was None. ++ """ ++ ++ match = '' ++ ++ gdb.sendline(command) ++ ++ if pattern: ++ # PExpect does a non-greedy match for '+' and '*'. Since it can't look ++ # ahead on the gdb output stream, if 'pattern' ends with a '+' or a '*' ++ # we may end up matching only part of the required output. ++ # To avoid this, we'll consume 'pattern' and anything that follows it ++ # up to and including the gdb prompt, then extract 'pattern' later. ++ index = gdb.expect([u'{0}.+{1}'.format(pattern, gdb_prompt), ++ pexpect.TIMEOUT]) ++ ++ if index == 0: ++ # gdb.after now contains the whole match. Extract the text that ++ # matches 'pattern'. ++ match = re.match(pattern, gdb.after, re.DOTALL).group() ++ elif index == 1: ++ # We got a timeout exception. Print information on what caused it ++ # and bail out. ++ error = ('Response does not match the expected pattern.\n' ++ 'Command: {0}\n' ++ 'Expected pattern: {1}\n' ++ 'Response: {2}'.format(command, pattern, gdb.before)) ++ ++ raise pexpect.TIMEOUT(error) ++ else: ++ # Consume just the the gdb prompt. ++ gdb.expect(gdb_prompt) ++ ++ return match ++ ++def init_test(test_bin, printer_files, printer_names): ++ """Loads the test binary file and the required pretty printers to gdb. ++ ++ Args: ++ test_bin (string): The name of the test binary file. ++ pretty_printers (list of strings): A list with the names of the pretty ++ printer files. ++ """ ++ ++ # Load all the pretty printer files. We're assuming these are safe. ++ for printer_file in printer_files: ++ test('source {0}'.format(printer_file)) ++ ++ # Disable all the pretty printers. ++ test('disable pretty-printer', r'0 of [0-9]+ printers enabled') ++ ++ # Enable only the required printers. ++ for printer in printer_names: ++ test('enable pretty-printer {0}'.format(printer), ++ r'[1-9][0-9]* of [1-9]+ printers enabled') ++ ++ # Finally, load the test binary. ++ test('file {0}'.format(test_bin)) ++ ++def go_to_main(): ++ """Executes a gdb 'start' command, which takes us to main.""" ++ ++ test('start', r'main') ++ ++def get_line_number(file_name, string): ++ """Returns the number of the line in which 'string' appears within a file. ++ ++ Args: ++ file_name (string): The name of the file we'll search through. ++ string (string): The string we'll look for. ++ ++ Returns: ++ int: The number of the line in which 'string' appears, starting from 1. ++ """ ++ number = -1 ++ ++ with open(file_name) as src_file: ++ for i, line in enumerate(src_file): ++ if string in line: ++ number = i + 1 ++ break ++ ++ if number == -1: ++ raise NoLineError(file_name, string) ++ ++ return number ++ ++def break_at(file_name, string, temporary=True, thread=None): ++ """Places a breakpoint on the first line in 'file_name' containing 'string'. ++ ++ 'string' is usually a comment like "Stop here". Notice this may fail unless ++ the comment is placed inline next to actual code, e.g.: ++ ++ ... ++ /* Stop here */ ++ ... ++ ++ may fail, while: ++ ++ ... ++ some_func(); /* Stop here */ ++ ... ++ ++ will succeed. ++ ++ If 'thread' isn't None, the breakpoint will be set for all the threads. ++ Otherwise, it'll be set only for 'thread'. ++ ++ Args: ++ file_name (string): The name of the file we'll place the breakpoint in. ++ string (string): A string we'll look for inside the file. ++ We'll place a breakpoint on the line which contains it. ++ temporary (bool): Whether the breakpoint should be automatically deleted ++ after we reach it. ++ thread (int): The number of the thread we'll place the breakpoint for, ++ as seen by gdb. If specified, it should be greater than zero. ++ """ ++ ++ if not thread: ++ thread_str = '' ++ else: ++ thread_str = 'thread {0}'.format(thread) ++ ++ if temporary: ++ command = 'tbreak' ++ break_type = 'Temporary breakpoint' ++ else: ++ command = 'break' ++ break_type = 'Breakpoint' ++ ++ line_number = str(get_line_number(file_name, string)) ++ ++ test('{0} {1}:{2} {3}'.format(command, file_name, line_number, thread_str), ++ r'{0} [0-9]+ at 0x[a-f0-9]+: file {1}, line {2}\.'.format(break_type, ++ file_name, ++ line_number)) ++ ++def continue_cmd(thread=None): ++ """Executes a gdb 'continue' command. ++ ++ If 'thread' isn't None, the command will be applied to all the threads. ++ Otherwise, it'll be applied only to 'thread'. ++ ++ Args: ++ thread (int): The number of the thread we'll apply the command to, ++ as seen by gdb. If specified, it should be greater than zero. ++ """ ++ ++ if not thread: ++ command = 'continue' ++ else: ++ command = 'thread apply {0} continue'.format(thread) ++ ++ test(command) ++ ++def next_cmd(count=1, thread=None): ++ """Executes a gdb 'next' command. ++ ++ If 'thread' isn't None, the command will be applied to all the threads. ++ Otherwise, it'll be applied only to 'thread'. ++ ++ Args: ++ count (int): The 'count' argument of the 'next' command. ++ thread (int): The number of the thread we'll apply the command to, ++ as seen by gdb. If specified, it should be greater than zero. ++ """ ++ ++ if not thread: ++ command = 'next' ++ else: ++ command = 'thread apply {0} next' ++ ++ test('{0} {1}'.format(command, count)) ++ ++def select_thread(thread): ++ """Selects the thread indicated by 'thread'. ++ ++ Args: ++ thread (int): The number of the thread we'll switch to, as seen by gdb. ++ This should be greater than zero. ++ """ ++ ++ if thread > 0: ++ test('thread {0}'.format(thread)) ++ ++def get_current_thread_lwpid(): ++ """Gets the current thread's Lightweight Process ID. ++ ++ Returns: ++ string: The current thread's LWP ID. ++ """ ++ ++ # It's easier to get the LWP ID through the Python API than the gdb CLI. ++ command = 'python print(gdb.selected_thread().ptid[1])' ++ ++ return test(command, r'[0-9]+') ++ ++def set_scheduler_locking(mode): ++ """Executes the gdb 'set scheduler-locking' command. ++ ++ Args: ++ mode (bool): Whether the scheduler locking mode should be 'on'. ++ """ ++ modes = { ++ True: 'on', ++ False: 'off' ++ } ++ ++ test('set scheduler-locking {0}'.format(modes[mode])) ++ ++def test_printer(var, to_string, children=None, is_ptr=True): ++ """ Tests the output of a pretty printer. ++ ++ For a variable called 'var', this tests whether its associated printer ++ outputs the expected 'to_string' and children (if any). ++ ++ Args: ++ var (string): The name of the variable we'll print. ++ to_string (raw string): The expected output of the printer's 'to_string' ++ method. ++ children (map {raw string->raw string}): A map with the expected output ++ of the printer's children' method. ++ is_ptr (bool): Whether 'var' is a pointer, and thus should be ++ dereferenced. ++ """ ++ ++ if is_ptr: ++ var = '*{0}'.format(var) ++ ++ test('print {0}'.format(var), to_string) ++ ++ if children: ++ for name, value in children.items(): ++ # Children are shown as 'name = value'. ++ test('print {0}'.format(var), r'{0} = {1}'.format(name, value)) ++ ++def check_debug_symbol(symbol): ++ """ Tests whether a given debugging symbol exists. ++ ++ If the symbol doesn't exist, raises a DebugError. ++ ++ Args: ++ symbol (string): The symbol we're going to check for. ++ """ ++ ++ try: ++ test('ptype {0}'.format(symbol), r'type = {0}'.format(symbol)) ++ ++ except pexpect.TIMEOUT: ++ # The symbol doesn't exist. ++ raise DebugError(symbol) +diff --git a/scripts/test_printers_exceptions.py b/scripts/test_printers_exceptions.py +new file mode 100644 +index 0000000..17034b5 +--- /dev/null ++++ b/scripts/test_printers_exceptions.py +@@ -0,0 +1,61 @@ ++# Exception classes used when testing the Python pretty printers. ++# ++# Copyright (C) 2016 Free Software Foundation, Inc. ++# This file is part of the GNU C Library. ++# ++# The GNU C Library is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public ++# License as published by the Free Software Foundation; either ++# version 2.1 of the License, or (at your option) any later version. ++# ++# The GNU C Library is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with the GNU C Library; if not, see ++# <http://www.gnu.org/licenses/>. ++ ++class NoLineError(Exception): ++ """Custom exception to indicate that a test file doesn't contain ++ the requested string. ++ """ ++ ++ def __init__(self, file_name, string): ++ """Constructor. ++ ++ Args: ++ file_name (string): The name of the test file. ++ string (string): The string that was requested. ++ """ ++ ++ super(NoLineError, self).__init__() ++ self.file_name = file_name ++ self.string = string ++ ++ def __str__(self): ++ """Shows a readable representation of the exception.""" ++ ++ return ('File {0} has no line containing the following string: {1}' ++ .format(self.file_name, self.string)) ++ ++class DebugError(Exception): ++ """Custom exception to indicate that a required debugging symbol is missing. ++ """ ++ ++ def __init__(self, symbol): ++ """Constructor. ++ ++ Args: ++ symbol (string): The name of the entity whose debug info is missing. ++ """ ++ ++ super(DebugError, self).__init__() ++ self.symbol = symbol ++ ++ def __str__(self): ++ """Shows a readable representation of the exception.""" ++ ++ return ('The required debugging information for {0} is missing.' ++ .format(self.symbol)) +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0004-New-condvar-implementation-that-provides-stronger-or.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0004-New-condvar-implementation-that-provides-stronger-or.patch new file mode 100644 index 000000000..3c7bfa160 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0004-New-condvar-implementation-that-provides-stronger-or.patch @@ -0,0 +1,7171 @@ +From 27af8689a6ba8d182f3cbe6ba42cc654ceed0351 Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 30 Jun 2017 11:56:41 +0300 +Subject: [PATCH 4/6] New condvar implementation that provides stronger + ordering guarantees. + +This is a new implementation for condition variables, required +after http://austingroupbugs.net/view.php?id=609 to fix bug 13165. In +essence, we need to be stricter in which waiters a signal or broadcast +is required to wake up; this couldn't be solved using the old algorithm. +ISO C++ made a similar clarification, so this also fixes a bug in +current libstdc++, for example. + +We can't use the old algorithm anymore because futexes do not guarantee +to wake in FIFO order. Thus, when we wake, we can't simply let any +waiter grab a signal, but we need to ensure that one of the waiters +happening before the signal is woken up. This is something the previous +algorithm violated (see bug 13165). + +There's another issue specific to condvars: ABA issues on the underlying +futexes. Unlike mutexes that have just three states, or semaphores that +have no tokens or a limited number of them, the state of a condvar is +the *order* of the waiters. A waiter on a semaphore can grab a token +whenever one is available; a condvar waiter must only consume a signal +if it is eligible to do so as determined by the relative order of the +waiter and the signal. +Therefore, this new algorithm maintains two groups of waiters: Those +eligible to consume signals (G1), and those that have to wait until +previous waiters have consumed signals (G2). Once G1 is empty, G2 +becomes the new G1. 64b counters are used to avoid ABA issues. + +This condvar doesn't yet use a requeue optimization (ie, on a broadcast, +waking just one thread and requeueing all others on the futex of the +mutex supplied by the program). I don't think doing the requeue is +necessarily the right approach (but I haven't done real measurements +yet): +* If a program expects to wake many threads at the same time and make +that scalable, a condvar isn't great anyway because of how it requires +waiters to operate mutually exclusive (due to the mutex usage). Thus, a +thundering herd problem is a scalability problem with or without the +optimization. Using something like a semaphore might be more +appropriate in such a case. +* The scalability problem is actually at the mutex side; the condvar +could help (and it tries to with the requeue optimization), but it +should be the mutex who decides how that is done, and whether it is done +at all. +* Forcing all but one waiter into the kernel-side wait queue of the +mutex prevents/avoids the use of lock elision on the mutex. Thus, it +prevents the only cure against the underlying scalability problem +inherent to condvars. +* If condvars use short critical sections (ie, hold the mutex just to +check a binary flag or such), which they should do ideally, then forcing +all those waiter to proceed serially with kernel-based hand-off (ie, +futex ops in the mutex' contended state, via the futex wait queues) will +be less efficient than just letting a scalable mutex implementation take +care of it. Our current mutex impl doesn't employ spinning at all, but +if critical sections are short, spinning can be much better. +* Doing the requeue stuff requires all waiters to always drive the mutex +into the contended state. This leads to each waiter having to call +futex_wake after lock release, even if this wouldn't be necessary. + + [BZ #13165] + * nptl/pthread_cond_broadcast.c (__pthread_cond_broadcast): Rewrite to + use new algorithm. + * nptl/pthread_cond_destroy.c (__pthread_cond_destroy): Likewise. + * nptl/pthread_cond_init.c (__pthread_cond_init): Likewise. + * nptl/pthread_cond_signal.c (__pthread_cond_signal): Likewise. + * nptl/pthread_cond_wait.c (__pthread_cond_wait): Likewise. + (__pthread_cond_timedwait): Move here from pthread_cond_timedwait.c. + (__condvar_confirm_wakeup, __condvar_cancel_waiting, + __condvar_cleanup_waiting, __condvar_dec_grefs, + __pthread_cond_wait_common): New. + (__condvar_cleanup): Remove. + * npt/pthread_condattr_getclock.c (pthread_condattr_getclock): Adapt. + * npt/pthread_condattr_setclock.c (pthread_condattr_setclock): + Likewise. + * npt/pthread_condattr_getpshared.c (pthread_condattr_getpshared): + Likewise. + * npt/pthread_condattr_init.c (pthread_condattr_init): Likewise. + * nptl/tst-cond1.c: Add comment. + * nptl/tst-cond20.c (do_test): Adapt. + * nptl/tst-cond22.c (do_test): Likewise. + * sysdeps/aarch64/nptl/bits/pthreadtypes.h (pthread_cond_t): Adapt + structure. + * sysdeps/arm/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/ia64/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/m68k/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/microblaze/nptl/bits/pthreadtypes.h (pthread_cond_t): + Likewise. + * sysdeps/mips/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/nios2/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/s390/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/sh/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/tile/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h (pthread_cond_t): + Likewise. + * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h (pthread_cond_t): + Likewise. + * sysdeps/x86/bits/pthreadtypes.h (pthread_cond_t): Likewise. + * sysdeps/nptl/internaltypes.h (COND_NWAITERS_SHIFT): Remove. + (COND_CLOCK_BITS): Adapt. + * sysdeps/nptl/pthread.h (PTHREAD_COND_INITIALIZER): Adapt. + * nptl/pthreadP.h (__PTHREAD_COND_CLOCK_MONOTONIC_MASK, + __PTHREAD_COND_SHARED_MASK): New. + * nptl/nptl-printers.py (CLOCK_IDS): Remove. + (ConditionVariablePrinter, ConditionVariableAttributesPrinter): Adapt. + * nptl/nptl_lock_constants.pysym: Adapt. + * nptl/test-cond-printers.py: Adapt. + * sysdeps/unix/sysv/linux/hppa/internaltypes.h (cond_compat_clear, + cond_compat_check_and_clear): Adapt. + * sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c: Remove file ... + * sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c + (__pthread_cond_timedwait): ... and move here. + * nptl/DESIGN-condvar.txt: Remove file. + * nptl/lowlevelcond.sym: Likewise. + * nptl/pthread_cond_timedwait.c: Likewise. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S: Likewise. + * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S: Likewise. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Likewise. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise. + * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise. + +Upstream-Status: Backport + +Author: Torvald Riegel <triegel@redhat.com> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 74 ++ + nptl/DESIGN-condvar.txt | 134 --- + nptl/Makefile | 6 +- + nptl/lowlevelcond.sym | 16 - + nptl/nptl-printers.py | 70 +- + nptl/nptl_lock_constants.pysym | 27 +- + nptl/pthreadP.h | 7 + + nptl/pthread_cond_broadcast.c | 99 ++- + nptl/pthread_cond_common.c | 466 ++++++++++ + nptl/pthread_cond_destroy.c | 82 +- + nptl/pthread_cond_init.c | 28 +- + nptl/pthread_cond_signal.c | 99 ++- + nptl/pthread_cond_timedwait.c | 268 ------ + nptl/pthread_cond_wait.c | 754 ++++++++++++---- + nptl/pthread_condattr_getclock.c | 2 +- + nptl/pthread_condattr_getpshared.c | 3 +- + nptl/pthread_condattr_init.c | 4 +- + nptl/pthread_condattr_setclock.c | 11 +- + nptl/test-cond-printers.py | 2 +- + nptl/tst-cond1.c | 3 + + nptl/tst-cond20.c | 5 +- + nptl/tst-cond22.c | 18 +- + sysdeps/aarch64/nptl/bits/pthreadtypes.h | 31 +- + sysdeps/arm/nptl/bits/pthreadtypes.h | 29 +- + sysdeps/ia64/nptl/bits/pthreadtypes.h | 31 +- + sysdeps/m68k/nptl/bits/pthreadtypes.h | 32 +- + sysdeps/microblaze/nptl/bits/pthreadtypes.h | 29 +- + sysdeps/mips/nptl/bits/pthreadtypes.h | 31 +- + sysdeps/nios2/nptl/bits/pthreadtypes.h | 31 +- + sysdeps/nptl/internaltypes.h | 17 +- + sysdeps/nptl/pthread.h | 2 +- + sysdeps/s390/nptl/bits/pthreadtypes.h | 29 +- + sysdeps/sh/nptl/bits/pthreadtypes.h | 29 +- + sysdeps/tile/nptl/bits/pthreadtypes.h | 29 +- + sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h | 31 +- + sysdeps/unix/sysv/linux/hppa/internaltypes.h | 40 +- + .../unix/sysv/linux/hppa/pthread_cond_timedwait.c | 41 - + sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c | 13 + + .../sysv/linux/i386/i686/pthread_cond_timedwait.S | 20 - + .../unix/sysv/linux/i386/pthread_cond_broadcast.S | 241 ----- + sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S | 216 ----- + .../unix/sysv/linux/i386/pthread_cond_timedwait.S | 974 --------------------- + sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S | 642 -------------- + .../unix/sysv/linux/powerpc/bits/pthreadtypes.h | 31 +- + .../sysv/linux/x86_64/pthread_cond_broadcast.S | 177 ---- + .../unix/sysv/linux/x86_64/pthread_cond_signal.S | 161 ---- + .../sysv/linux/x86_64/pthread_cond_timedwait.S | 623 ------------- + sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S | 555 ------------ + sysdeps/x86/bits/pthreadtypes.h | 29 +- + 49 files changed, 1671 insertions(+), 4621 deletions(-) + delete mode 100644 nptl/DESIGN-condvar.txt + delete mode 100644 nptl/lowlevelcond.sym + create mode 100644 nptl/pthread_cond_common.c + delete mode 100644 nptl/pthread_cond_timedwait.c + delete mode 100644 sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c + delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S + delete mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S + delete mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S + delete mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S + delete mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S + delete mode 100644 sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S + delete mode 100644 sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S + delete mode 100644 sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S + delete mode 100644 sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S + +diff --git a/ChangeLog b/ChangeLog +index 8036c1e..c94db7b 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,77 @@ ++2016-12-31 Torvald Riegel <triegel@redhat.com> ++ ++ [BZ #13165] ++ * nptl/pthread_cond_broadcast.c (__pthread_cond_broadcast): Rewrite to ++ use new algorithm. ++ * nptl/pthread_cond_destroy.c (__pthread_cond_destroy): Likewise. ++ * nptl/pthread_cond_init.c (__pthread_cond_init): Likewise. ++ * nptl/pthread_cond_signal.c (__pthread_cond_signal): Likewise. ++ * nptl/pthread_cond_wait.c (__pthread_cond_wait): Likewise. ++ (__pthread_cond_timedwait): Move here from pthread_cond_timedwait.c. ++ (__condvar_confirm_wakeup, __condvar_cancel_waiting, ++ __condvar_cleanup_waiting, __condvar_dec_grefs, ++ __pthread_cond_wait_common): New. ++ (__condvar_cleanup): Remove. ++ * npt/pthread_condattr_getclock.c (pthread_condattr_getclock): Adapt. ++ * npt/pthread_condattr_setclock.c (pthread_condattr_setclock): ++ Likewise. ++ * npt/pthread_condattr_getpshared.c (pthread_condattr_getpshared): ++ Likewise. ++ * npt/pthread_condattr_init.c (pthread_condattr_init): Likewise. ++ * nptl/tst-cond1.c: Add comment. ++ * nptl/tst-cond20.c (do_test): Adapt. ++ * nptl/tst-cond22.c (do_test): Likewise. ++ * sysdeps/aarch64/nptl/bits/pthreadtypes.h (pthread_cond_t): Adapt ++ structure. ++ * sysdeps/arm/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/ia64/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/m68k/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/microblaze/nptl/bits/pthreadtypes.h (pthread_cond_t): ++ Likewise. ++ * sysdeps/mips/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/nios2/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/s390/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/sh/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/tile/nptl/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h (pthread_cond_t): ++ Likewise. ++ * sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h (pthread_cond_t): ++ Likewise. ++ * sysdeps/x86/bits/pthreadtypes.h (pthread_cond_t): Likewise. ++ * sysdeps/nptl/internaltypes.h (COND_NWAITERS_SHIFT): Remove. ++ (COND_CLOCK_BITS): Adapt. ++ * sysdeps/nptl/pthread.h (PTHREAD_COND_INITIALIZER): Adapt. ++ * nptl/pthreadP.h (__PTHREAD_COND_CLOCK_MONOTONIC_MASK, ++ __PTHREAD_COND_SHARED_MASK): New. ++ * nptl/nptl-printers.py (CLOCK_IDS): Remove. ++ (ConditionVariablePrinter, ConditionVariableAttributesPrinter): Adapt. ++ * nptl/nptl_lock_constants.pysym: Adapt. ++ * nptl/test-cond-printers.py: Adapt. ++ * sysdeps/unix/sysv/linux/hppa/internaltypes.h (cond_compat_clear, ++ cond_compat_check_and_clear): Adapt. ++ * sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c: Remove file ... ++ * sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c ++ (__pthread_cond_timedwait): ... and move here. ++ * nptl/DESIGN-condvar.txt: Remove file. ++ * nptl/lowlevelcond.sym: Likewise. ++ * nptl/pthread_cond_timedwait.c: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S: Likewise. ++ * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S: Likewise. ++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: Likewise. ++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: Likewise. ++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S: Likewise. ++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise. ++ + 2016-12-08 Martin Galvan <martin.galvan@tallertechnologies.com> + + * INSTALL: Regenerated. +diff --git a/nptl/DESIGN-condvar.txt b/nptl/DESIGN-condvar.txt +deleted file mode 100644 +index 4845251..0000000 +--- a/nptl/DESIGN-condvar.txt ++++ /dev/null +@@ -1,134 +0,0 @@ +-Conditional Variable pseudocode. +-================================ +- +- int pthread_cond_timedwait (pthread_cond_t *cv, pthread_mutex_t *mutex); +- int pthread_cond_signal (pthread_cond_t *cv); +- int pthread_cond_broadcast (pthread_cond_t *cv); +- +-struct pthread_cond_t { +- +- unsigned int cond_lock; +- +- internal mutex +- +- uint64_t total_seq; +- +- Total number of threads using the conditional variable. +- +- uint64_t wakeup_seq; +- +- sequence number for next wakeup. +- +- uint64_t woken_seq; +- +- sequence number of last woken thread. +- +- uint32_t broadcast_seq; +- +-} +- +- +-struct cv_data { +- +- pthread_cond_t *cv; +- +- uint32_t bc_seq +- +-} +- +- +- +-cleanup_handler(cv_data) +-{ +- cv = cv_data->cv; +- lll_lock(cv->lock); +- +- if (cv_data->bc_seq == cv->broadcast_seq) { +- ++cv->wakeup_seq; +- ++cv->woken_seq; +- } +- +- /* make sure no signal gets lost. */ +- FUTEX_WAKE(cv->wakeup_seq, ALL); +- +- lll_unlock(cv->lock); +-} +- +- +-cond_timedwait(cv, mutex, timeout): +-{ +- lll_lock(cv->lock); +- mutex_unlock(mutex); +- +- cleanup_push +- +- ++cv->total_seq; +- val = seq = cv->wakeup_seq; +- cv_data.bc = cv->broadcast_seq; +- cv_data.cv = cv; +- +- while (1) { +- +- lll_unlock(cv->lock); +- +- enable_async(&cv_data); +- +- ret = FUTEX_WAIT(cv->wakeup_seq, val, timeout); +- +- restore_async +- +- lll_lock(cv->lock); +- +- if (bc != cv->broadcast_seq) +- goto bc_out; +- +- val = cv->wakeup_seq; +- +- if (val != seq && cv->woken_seq != val) { +- ret = 0; +- break; +- } +- +- if (ret == TIMEDOUT) { +- ++cv->wakeup_seq; +- break; +- } +- } +- +- ++cv->woken_seq; +- +- bc_out: +- lll_unlock(cv->lock); +- +- cleanup_pop +- +- mutex_lock(mutex); +- +- return ret; +-} +- +-cond_signal(cv) +-{ +- lll_lock(cv->lock); +- +- if (cv->total_seq > cv->wakeup_seq) { +- ++cv->wakeup_seq; +- FUTEX_WAKE(cv->wakeup_seq, 1); +- } +- +- lll_unlock(cv->lock); +-} +- +-cond_broadcast(cv) +-{ +- lll_lock(cv->lock); +- +- if (cv->total_seq > cv->wakeup_seq) { +- cv->wakeup_seq = cv->total_seq; +- cv->woken_seq = cv->total_seq; +- ++cv->broadcast_seq; +- FUTEX_WAKE(cv->wakeup_seq, ALL); +- } +- +- lll_unlock(cv->lock); +-} +diff --git a/nptl/Makefile b/nptl/Makefile +index 49f6ba6..1f0674c 100644 +--- a/nptl/Makefile ++++ b/nptl/Makefile +@@ -71,7 +71,7 @@ libpthread-routines = nptl-init vars events version pt-interp \ + pthread_rwlockattr_getkind_np \ + pthread_rwlockattr_setkind_np \ + pthread_cond_init pthread_cond_destroy \ +- pthread_cond_wait pthread_cond_timedwait \ ++ pthread_cond_wait \ + pthread_cond_signal pthread_cond_broadcast \ + old_pthread_cond_init old_pthread_cond_destroy \ + old_pthread_cond_wait old_pthread_cond_timedwait \ +@@ -181,7 +181,6 @@ CFLAGS-pthread_timedjoin.c = -fexceptions -fasynchronous-unwind-tables + CFLAGS-pthread_once.c = $(uses-callbacks) -fexceptions \ + -fasynchronous-unwind-tables + CFLAGS-pthread_cond_wait.c = -fexceptions -fasynchronous-unwind-tables +-CFLAGS-pthread_cond_timedwait.c = -fexceptions -fasynchronous-unwind-tables + CFLAGS-sem_wait.c = -fexceptions -fasynchronous-unwind-tables + CFLAGS-sem_timedwait.c = -fexceptions -fasynchronous-unwind-tables + +@@ -303,8 +302,7 @@ test-xfail-tst-once5 = yes + # Files which must not be linked with libpthread. + tests-nolibpthread = tst-unload + +-gen-as-const-headers = pthread-errnos.sym \ +- lowlevelcond.sym lowlevelrwlock.sym \ ++gen-as-const-headers = pthread-errnos.sym lowlevelrwlock.sym \ + unwindbuf.sym \ + lowlevelrobustlock.sym pthread-pi-defines.sym + +diff --git a/nptl/lowlevelcond.sym b/nptl/lowlevelcond.sym +deleted file mode 100644 +index 18e1ada..0000000 +--- a/nptl/lowlevelcond.sym ++++ /dev/null +@@ -1,16 +0,0 @@ +-#include <stddef.h> +-#include <sched.h> +-#include <bits/pthreadtypes.h> +-#include <internaltypes.h> +- +--- +- +-cond_lock offsetof (pthread_cond_t, __data.__lock) +-cond_futex offsetof (pthread_cond_t, __data.__futex) +-cond_nwaiters offsetof (pthread_cond_t, __data.__nwaiters) +-total_seq offsetof (pthread_cond_t, __data.__total_seq) +-wakeup_seq offsetof (pthread_cond_t, __data.__wakeup_seq) +-woken_seq offsetof (pthread_cond_t, __data.__woken_seq) +-dep_mutex offsetof (pthread_cond_t, __data.__mutex) +-broadcast_seq offsetof (pthread_cond_t, __data.__broadcast_seq) +-nwaiters_shift COND_NWAITERS_SHIFT +diff --git a/nptl/nptl-printers.py b/nptl/nptl-printers.py +index e402f23..76adadd 100644 +--- a/nptl/nptl-printers.py ++++ b/nptl/nptl-printers.py +@@ -293,16 +293,6 @@ class MutexAttributesPrinter(object): + elif protocol == PTHREAD_PRIO_PROTECT: + self.values.append(('Protocol', 'Priority protect')) + +-CLOCK_IDS = { +- CLOCK_REALTIME: 'CLOCK_REALTIME', +- CLOCK_MONOTONIC: 'CLOCK_MONOTONIC', +- CLOCK_PROCESS_CPUTIME_ID: 'CLOCK_PROCESS_CPUTIME_ID', +- CLOCK_THREAD_CPUTIME_ID: 'CLOCK_THREAD_CPUTIME_ID', +- CLOCK_MONOTONIC_RAW: 'CLOCK_MONOTONIC_RAW', +- CLOCK_REALTIME_COARSE: 'CLOCK_REALTIME_COARSE', +- CLOCK_MONOTONIC_COARSE: 'CLOCK_MONOTONIC_COARSE' +-} +- + class ConditionVariablePrinter(object): + """Pretty printer for pthread_cond_t.""" + +@@ -313,24 +303,8 @@ class ConditionVariablePrinter(object): + cond: A gdb.value representing a pthread_cond_t. + """ + +- # Since PTHREAD_COND_SHARED is an integer, we need to cast it to void * +- # to be able to compare it to the condvar's __data.__mutex member. +- # +- # While it looks like self.shared_value should be a class variable, +- # that would result in it having an incorrect size if we're loading +- # these printers through .gdbinit for a 64-bit objfile in AMD64. +- # This is because gdb initially assumes the pointer size to be 4 bytes, +- # and only sets it to 8 after loading the 64-bit objfiles. Since +- # .gdbinit runs before any objfiles are loaded, this would effectively +- # make self.shared_value have a size of 4, thus breaking later +- # comparisons with pointers whose types are looked up at runtime. +- void_ptr_type = gdb.lookup_type('void').pointer() +- self.shared_value = gdb.Value(PTHREAD_COND_SHARED).cast(void_ptr_type) +- + data = cond['__data'] +- self.total_seq = data['__total_seq'] +- self.mutex = data['__mutex'] +- self.nwaiters = data['__nwaiters'] ++ self.wrefs = data['__wrefs'] + self.values = [] + + self.read_values() +@@ -360,7 +334,6 @@ class ConditionVariablePrinter(object): + + self.read_status() + self.read_attributes() +- self.read_mutex_info() + + def read_status(self): + """Read the status of the condvar. +@@ -369,41 +342,22 @@ class ConditionVariablePrinter(object): + are waiting for it. + """ + +- if self.total_seq == PTHREAD_COND_DESTROYED: +- self.values.append(('Status', 'Destroyed')) +- +- self.values.append(('Threads waiting for this condvar', +- self.nwaiters >> COND_NWAITERS_SHIFT)) ++ self.values.append(('Threads known to still execute a wait function', ++ self.wrefs >> PTHREAD_COND_WREFS_SHIFT)) + + def read_attributes(self): + """Read the condvar's attributes.""" + +- clock_id = self.nwaiters & ((1 << COND_NWAITERS_SHIFT) - 1) +- +- # clock_id must be casted to int because it's a gdb.Value +- self.values.append(('Clock ID', CLOCK_IDS[int(clock_id)])) ++ if (self.wrefs & PTHREAD_COND_CLOCK_MONOTONIC_MASK) != 0: ++ self.values.append(('Clock ID', 'CLOCK_MONOTONIC')) ++ else: ++ self.values.append(('Clock ID', 'CLOCK_REALTIME')) + +- shared = (self.mutex == self.shared_value) +- +- if shared: ++ if (self.wrefs & PTHREAD_COND_SHARED_MASK) != 0: + self.values.append(('Shared', 'Yes')) + else: + self.values.append(('Shared', 'No')) + +- def read_mutex_info(self): +- """Read the data of the mutex this condvar is bound to. +- +- A pthread_cond_t's __data.__mutex member is a void * which +- must be casted to pthread_mutex_t *. For shared condvars, this +- member isn't recorded and has a special value instead. +- """ +- +- if self.mutex and self.mutex != self.shared_value: +- mutex_type = gdb.lookup_type('pthread_mutex_t') +- mutex = self.mutex.cast(mutex_type.pointer()).dereference() +- +- self.values.append(('Mutex', mutex)) +- + class ConditionVariableAttributesPrinter(object): + """Pretty printer for pthread_condattr_t. + +@@ -453,10 +407,12 @@ class ConditionVariableAttributesPrinter(object): + created in self.children. + """ + +- clock_id = self.condattr & ((1 << COND_NWAITERS_SHIFT) - 1) ++ clock_id = (self.condattr >> 1) & ((1 << COND_CLOCK_BITS) - 1) + +- # clock_id must be casted to int because it's a gdb.Value +- self.values.append(('Clock ID', CLOCK_IDS[int(clock_id)])) ++ if clock_id != 0: ++ self.values.append(('Clock ID', 'CLOCK_MONOTONIC')) ++ else: ++ self.values.append(('Clock ID', 'CLOCK_REALTIME')) + + if self.condattr & 1: + self.values.append(('Shared', 'Yes')) +diff --git a/nptl/nptl_lock_constants.pysym b/nptl/nptl_lock_constants.pysym +index 303ec61..2ab3179 100644 +--- a/nptl/nptl_lock_constants.pysym ++++ b/nptl/nptl_lock_constants.pysym +@@ -44,26 +44,13 @@ PTHREAD_PRIO_NONE + PTHREAD_PRIO_INHERIT + PTHREAD_PRIO_PROTECT + +--- These values are hardcoded as well: +--- Value of __mutex for shared condvars. +-PTHREAD_COND_SHARED (void *)~0l +- +--- Value of __total_seq for destroyed condvars. +-PTHREAD_COND_DESTROYED -1ull +- +--- __nwaiters encodes the number of threads waiting on a condvar +--- and the clock ID. +--- __nwaiters >> COND_NWAITERS_SHIFT gives us the number of waiters. +-COND_NWAITERS_SHIFT +- +--- Condvar clock IDs +-CLOCK_REALTIME +-CLOCK_MONOTONIC +-CLOCK_PROCESS_CPUTIME_ID +-CLOCK_THREAD_CPUTIME_ID +-CLOCK_MONOTONIC_RAW +-CLOCK_REALTIME_COARSE +-CLOCK_MONOTONIC_COARSE ++-- Condition variable ++-- FIXME Why do macros prefixed with __ cannot be used directly? ++PTHREAD_COND_SHARED_MASK __PTHREAD_COND_SHARED_MASK ++PTHREAD_COND_CLOCK_MONOTONIC_MASK __PTHREAD_COND_CLOCK_MONOTONIC_MASK ++COND_CLOCK_BITS ++-- These values are hardcoded: ++PTHREAD_COND_WREFS_SHIFT 3 + + -- Rwlock attributes + PTHREAD_RWLOCK_PREFER_READER_NP +diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h +index 4edc74b..e9992bc 100644 +--- a/nptl/pthreadP.h ++++ b/nptl/pthreadP.h +@@ -167,6 +167,13 @@ enum + #define __PTHREAD_ONCE_FORK_GEN_INCR 4 + + ++/* Condition variable definitions. See __pthread_cond_wait_common. ++ Need to be defined here so there is one place from which ++ nptl_lock_constants can grab them. */ ++#define __PTHREAD_COND_CLOCK_MONOTONIC_MASK 2 ++#define __PTHREAD_COND_SHARED_MASK 1 ++ ++ + /* Internal variables. */ + + +diff --git a/nptl/pthread_cond_broadcast.c b/nptl/pthread_cond_broadcast.c +index 552fd42..87c0755 100644 +--- a/nptl/pthread_cond_broadcast.c ++++ b/nptl/pthread_cond_broadcast.c +@@ -19,72 +19,71 @@ + #include <endian.h> + #include <errno.h> + #include <sysdep.h> +-#include <lowlevellock.h> ++#include <futex-internal.h> + #include <pthread.h> + #include <pthreadP.h> + #include <stap-probe.h> ++#include <atomic.h> + + #include <shlib-compat.h> +-#include <kernel-features.h> + ++#include "pthread_cond_common.c" + ++ ++/* We do the following steps from __pthread_cond_signal in one critical ++ section: (1) signal all waiters in G1, (2) close G1 so that it can become ++ the new G2 and make G2 the new G1, and (3) signal all waiters in the new ++ G1. We don't need to do all these steps if there are no waiters in G1 ++ and/or G2. See __pthread_cond_signal for further details. */ + int + __pthread_cond_broadcast (pthread_cond_t *cond) + { + LIBC_PROBE (cond_broadcast, 1, cond); + +- int pshared = (cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; +- /* Make sure we are alone. */ +- lll_lock (cond->__data.__lock, pshared); ++ unsigned int wrefs = atomic_load_relaxed (&cond->__data.__wrefs); ++ if (wrefs >> 3 == 0) ++ return 0; ++ int private = __condvar_get_private (wrefs); ++ ++ __condvar_acquire_lock (cond, private); + +- /* Are there any waiters to be woken? */ +- if (cond->__data.__total_seq > cond->__data.__wakeup_seq) ++ unsigned long long int wseq = __condvar_load_wseq_relaxed (cond); ++ unsigned int g2 = wseq & 1; ++ unsigned int g1 = g2 ^ 1; ++ wseq >>= 1; ++ bool do_futex_wake = false; ++ ++ /* Step (1): signal all waiters remaining in G1. */ ++ if (cond->__data.__g_size[g1] != 0) + { +- /* Yes. Mark them all as woken. */ +- cond->__data.__wakeup_seq = cond->__data.__total_seq; +- cond->__data.__woken_seq = cond->__data.__total_seq; +- cond->__data.__futex = (unsigned int) cond->__data.__total_seq * 2; +- int futex_val = cond->__data.__futex; +- /* Signal that a broadcast happened. */ +- ++cond->__data.__broadcast_seq; +- +- /* We are done. */ +- lll_unlock (cond->__data.__lock, pshared); +- +- /* Wake everybody. */ +- pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; +- +- /* Do not use requeue for pshared condvars. */ +- if (mut == (void *) ~0l +- || PTHREAD_MUTEX_PSHARED (mut) & PTHREAD_MUTEX_PSHARED_BIT) +- goto wake_all; +- +-#if (defined lll_futex_cmp_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- if (USE_REQUEUE_PI (mut)) +- { +- if (lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, INT_MAX, +- &mut->__data.__lock, futex_val, +- LLL_PRIVATE) == 0) +- return 0; +- } +- else +-#endif +- /* lll_futex_requeue returns 0 for success and non-zero +- for errors. */ +- if (!__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1, +- INT_MAX, &mut->__data.__lock, +- futex_val, LLL_PRIVATE), 0)) +- return 0; +- +-wake_all: +- lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared); +- return 0; ++ /* Add as many signals as the remaining size of the group. */ ++ atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, ++ cond->__data.__g_size[g1] << 1); ++ cond->__data.__g_size[g1] = 0; ++ ++ /* We need to wake G1 waiters before we quiesce G1 below. */ ++ /* TODO Only set it if there are indeed futex waiters. We could ++ also try to move this out of the critical section in cases when ++ G2 is empty (and we don't need to quiesce). */ ++ futex_wake (cond->__data.__g_signals + g1, INT_MAX, private); + } + +- /* We are done. */ +- lll_unlock (cond->__data.__lock, pshared); ++ /* G1 is complete. Step (2) is next unless there are no waiters in G2, in ++ which case we can stop. */ ++ if (__condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private)) ++ { ++ /* Step (3): Send signals to all waiters in the old G2 / new G1. */ ++ atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, ++ cond->__data.__g_size[g1] << 1); ++ cond->__data.__g_size[g1] = 0; ++ /* TODO Only set it if there are indeed futex waiters. */ ++ do_futex_wake = true; ++ } ++ ++ __condvar_release_lock (cond, private); ++ ++ if (do_futex_wake) ++ futex_wake (cond->__data.__g_signals + g1, INT_MAX, private); + + return 0; + } +diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c +new file mode 100644 +index 0000000..b374396 +--- /dev/null ++++ b/nptl/pthread_cond_common.c +@@ -0,0 +1,466 @@ ++/* pthread_cond_common -- shared code for condition variable. ++ Copyright (C) 2016 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, see ++ <http://www.gnu.org/licenses/>. */ ++ ++#include <atomic.h> ++#include <stdint.h> ++#include <pthread.h> ++#include <libc-internal.h> ++ ++/* We need 3 least-significant bits on __wrefs for something else. */ ++#define __PTHREAD_COND_MAX_GROUP_SIZE ((unsigned) 1 << 29) ++ ++#if __HAVE_64B_ATOMICS == 1 ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_load_wseq_relaxed (pthread_cond_t *cond) ++{ ++ return atomic_load_relaxed (&cond->__data.__wseq); ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val) ++{ ++ return atomic_fetch_add_acquire (&cond->__data.__wseq, val); ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) ++{ ++ return atomic_fetch_xor_release (&cond->__data.__wseq, val); ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_load_g1_start_relaxed (pthread_cond_t *cond) ++{ ++ return atomic_load_relaxed (&cond->__data.__g1_start); ++} ++ ++static void __attribute__ ((unused)) ++__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val) ++{ ++ atomic_store_relaxed (&cond->__data.__g1_start, ++ atomic_load_relaxed (&cond->__data.__g1_start) + val); ++} ++ ++#else ++ ++/* We use two 64b counters: __wseq and __g1_start. They are monotonically ++ increasing and single-writer-multiple-readers counters, so we can implement ++ load, fetch-and-add, and fetch-and-xor operations even when we just have ++ 32b atomics. Values we add or xor are less than or equal to 1<<31 (*), ++ so we only have to make overflow-and-addition atomic wrt. to concurrent ++ load operations and xor operations. To do that, we split each counter into ++ two 32b values of which we reserve the MSB of each to represent an ++ overflow from the lower-order half to the higher-order half. ++ ++ In the common case, the state is (higher-order / lower-order half, and . is ++ basically concatenation of the bits): ++ 0.h / 0.l = h.l ++ ++ When we add a value of x that overflows (i.e., 0.l + x == 1.L), we run the ++ following steps S1-S4 (the values these represent are on the right-hand ++ side): ++ S1: 0.h / 1.L == (h+1).L ++ S2: 1.(h+1) / 1.L == (h+1).L ++ S3: 1.(h+1) / 0.L == (h+1).L ++ S4: 0.(h+1) / 0.L == (h+1).L ++ If the LSB of the higher-order half is set, readers will ignore the ++ overflow bit in the lower-order half. ++ ++ To get an atomic snapshot in load operations, we exploit that the ++ higher-order half is monotonically increasing; if we load a value V from ++ it, then read the lower-order half, and then read the higher-order half ++ again and see the same value V, we know that both halves have existed in ++ the sequence of values the full counter had. This is similar to the ++ validated reads in the time-based STMs in GCC's libitm (e.g., ++ method_ml_wt). ++ ++ The xor operation needs to be an atomic read-modify-write. The write ++ itself is not an issue as it affects just the lower-order half but not bits ++ used in the add operation. To make the full fetch-and-xor atomic, we ++ exploit that concurrently, the value can increase by at most 1<<31 (*): The ++ xor operation is only called while having acquired the lock, so not more ++ than __PTHREAD_COND_MAX_GROUP_SIZE waiters can enter concurrently and thus ++ increment __wseq. Therefore, if the xor operation observes a value of ++ __wseq, then the value it applies the modification to later on can be ++ derived (see below). ++ ++ One benefit of this scheme is that this makes load operations ++ obstruction-free because unlike if we would just lock the counter, readers ++ can almost always interpret a snapshot of each halves. Readers can be ++ forced to read a new snapshot when the read is concurrent with an overflow. ++ However, overflows will happen infrequently, so load operations are ++ practically lock-free. ++ ++ (*) The highest value we add is __PTHREAD_COND_MAX_GROUP_SIZE << 2 to ++ __g1_start (the two extra bits are for the lock in the two LSBs of ++ __g1_start). */ ++ ++typedef struct ++{ ++ unsigned int low; ++ unsigned int high; ++} _condvar_lohi; ++ ++static uint64_t ++__condvar_fetch_add_64_relaxed (_condvar_lohi *lh, unsigned int op) ++{ ++ /* S1. Note that this is an atomic read-modify-write so it extends the ++ release sequence of release MO store at S3. */ ++ unsigned int l = atomic_fetch_add_relaxed (&lh->low, op); ++ unsigned int h = atomic_load_relaxed (&lh->high); ++ uint64_t result = ((uint64_t) h << 31) | l; ++ l += op; ++ if ((l >> 31) > 0) ++ { ++ /* Overflow. Need to increment higher-order half. Note that all ++ add operations are ordered in happens-before. */ ++ h++; ++ /* S2. Release MO to synchronize with the loads of the higher-order half ++ in the load operation. See __condvar_load_64_relaxed. */ ++ atomic_store_release (&lh->high, h | ((unsigned int) 1 << 31)); ++ l ^= (unsigned int) 1 << 31; ++ /* S3. See __condvar_load_64_relaxed. */ ++ atomic_store_release (&lh->low, l); ++ /* S4. Likewise. */ ++ atomic_store_release (&lh->high, h); ++ } ++ return result; ++} ++ ++static uint64_t ++__condvar_load_64_relaxed (_condvar_lohi *lh) ++{ ++ unsigned int h, l, h2; ++ do ++ { ++ /* This load and the second one below to the same location read from the ++ stores in the overflow handling of the add operation or the ++ initializing stores (which is a simple special case because ++ initialization always completely happens before further use). ++ Because no two stores to the higher-order half write the same value, ++ the loop ensures that if we continue to use the snapshot, this load ++ and the second one read from the same store operation. All candidate ++ store operations have release MO. ++ If we read from S2 in the first load, then we will see the value of ++ S1 on the next load (because we synchronize with S2), or a value ++ later in modification order. We correctly ignore the lower-half's ++ overflow bit in this case. If we read from S4, then we will see the ++ value of S3 in the next load (or a later value), which does not have ++ the overflow bit set anymore. ++ */ ++ h = atomic_load_acquire (&lh->high); ++ /* This will read from the release sequence of S3 (i.e, either the S3 ++ store or the read-modify-writes at S1 following S3 in modification ++ order). Thus, the read synchronizes with S3, and the following load ++ of the higher-order half will read from the matching S2 (or a later ++ value). ++ Thus, if we read a lower-half value here that already overflowed and ++ belongs to an increased higher-order half value, we will see the ++ latter and h and h2 will not be equal. */ ++ l = atomic_load_acquire (&lh->low); ++ /* See above. */ ++ h2 = atomic_load_relaxed (&lh->high); ++ } ++ while (h != h2); ++ if (((l >> 31) > 0) && ((h >> 31) > 0)) ++ l ^= (unsigned int) 1 << 31; ++ return ((uint64_t) (h & ~((unsigned int) 1 << 31)) << 31) + l; ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_load_wseq_relaxed (pthread_cond_t *cond) ++{ ++ return __condvar_load_64_relaxed ((_condvar_lohi *) &cond->__data.__wseq32); ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val) ++{ ++ uint64_t r = __condvar_fetch_add_64_relaxed ++ ((_condvar_lohi *) &cond->__data.__wseq32, val); ++ atomic_thread_fence_acquire (); ++ return r; ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val) ++{ ++ _condvar_lohi *lh = (_condvar_lohi *) &cond->__data.__wseq32; ++ /* First, get the current value. See __condvar_load_64_relaxed. */ ++ unsigned int h, l, h2; ++ do ++ { ++ h = atomic_load_acquire (&lh->high); ++ l = atomic_load_acquire (&lh->low); ++ h2 = atomic_load_relaxed (&lh->high); ++ } ++ while (h != h2); ++ if (((l >> 31) > 0) && ((h >> 31) == 0)) ++ h++; ++ h &= ~((unsigned int) 1 << 31); ++ l &= ~((unsigned int) 1 << 31); ++ ++ /* Now modify. Due to the coherence rules, the prior load will read a value ++ earlier in modification order than the following fetch-xor. ++ This uses release MO to make the full operation have release semantics ++ (all other operations access the lower-order half). */ ++ unsigned int l2 = atomic_fetch_xor_release (&lh->low, val) ++ & ~((unsigned int) 1 << 31); ++ if (l2 < l) ++ /* The lower-order half overflowed in the meantime. This happened exactly ++ once due to the limit on concurrent waiters (see above). */ ++ h++; ++ return ((uint64_t) h << 31) + l2; ++} ++ ++static uint64_t __attribute__ ((unused)) ++__condvar_load_g1_start_relaxed (pthread_cond_t *cond) ++{ ++ return __condvar_load_64_relaxed ++ ((_condvar_lohi *) &cond->__data.__g1_start32); ++} ++ ++static void __attribute__ ((unused)) ++__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val) ++{ ++ ignore_value (__condvar_fetch_add_64_relaxed ++ ((_condvar_lohi *) &cond->__data.__g1_start32, val)); ++} ++ ++#endif /* !__HAVE_64B_ATOMICS */ ++ ++ ++/* The lock that signalers use. See pthread_cond_wait_common for uses. ++ The lock is our normal three-state lock: not acquired (0) / acquired (1) / ++ acquired-with-futex_wake-request (2). However, we need to preserve the ++ other bits in the unsigned int used for the lock, and therefore it is a ++ little more complex. */ ++static void __attribute__ ((unused)) ++__condvar_acquire_lock (pthread_cond_t *cond, int private) ++{ ++ unsigned int s = atomic_load_relaxed (&cond->__data.__g1_orig_size); ++ while ((s & 3) == 0) ++ { ++ if (atomic_compare_exchange_weak_acquire (&cond->__data.__g1_orig_size, ++ &s, s | 1)) ++ return; ++ /* TODO Spinning and back-off. */ ++ } ++ /* We can't change from not acquired to acquired, so try to change to ++ acquired-with-futex-wake-request and do a futex wait if we cannot change ++ from not acquired. */ ++ while (1) ++ { ++ while ((s & 3) != 2) ++ { ++ if (atomic_compare_exchange_weak_acquire ++ (&cond->__data.__g1_orig_size, &s, (s & ~(unsigned int) 3) | 2)) ++ { ++ if ((s & 3) == 0) ++ return; ++ break; ++ } ++ /* TODO Back off. */ ++ } ++ futex_wait_simple (&cond->__data.__g1_orig_size, ++ (s & ~(unsigned int) 3) | 2, private); ++ /* Reload so we see a recent value. */ ++ s = atomic_load_relaxed (&cond->__data.__g1_orig_size); ++ } ++} ++ ++/* See __condvar_acquire_lock. */ ++static void __attribute__ ((unused)) ++__condvar_release_lock (pthread_cond_t *cond, int private) ++{ ++ if ((atomic_fetch_and_release (&cond->__data.__g1_orig_size, ++ ~(unsigned int) 3) & 3) ++ == 2) ++ futex_wake (&cond->__data.__g1_orig_size, 1, private); ++} ++ ++/* Only use this when having acquired the lock. */ ++static unsigned int __attribute__ ((unused)) ++__condvar_get_orig_size (pthread_cond_t *cond) ++{ ++ return atomic_load_relaxed (&cond->__data.__g1_orig_size) >> 2; ++} ++ ++/* Only use this when having acquired the lock. */ ++static void __attribute__ ((unused)) ++__condvar_set_orig_size (pthread_cond_t *cond, unsigned int size) ++{ ++ /* We have acquired the lock, but might get one concurrent update due to a ++ lock state change from acquired to acquired-with-futex_wake-request. ++ The store with relaxed MO is fine because there will be no further ++ changes to the lock bits nor the size, and we will subsequently release ++ the lock with release MO. */ ++ unsigned int s; ++ s = (atomic_load_relaxed (&cond->__data.__g1_orig_size) & 3) ++ | (size << 2); ++ if ((atomic_exchange_relaxed (&cond->__data.__g1_orig_size, s) & 3) ++ != (s & 3)) ++ atomic_store_relaxed (&cond->__data.__g1_orig_size, (size << 2) | 2); ++} ++ ++/* Returns FUTEX_SHARED or FUTEX_PRIVATE based on the provided __wrefs ++ value. */ ++static int __attribute__ ((unused)) ++__condvar_get_private (int flags) ++{ ++ if ((flags & __PTHREAD_COND_SHARED_MASK) == 0) ++ return FUTEX_PRIVATE; ++ else ++ return FUTEX_SHARED; ++} ++ ++/* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to ++ leave G1, converts G1 into a fresh G2, and then switches group roles so that ++ the former G2 becomes the new G1 ending at the current __wseq value when we ++ eventually make the switch (WSEQ is just an observation of __wseq by the ++ signaler). ++ If G2 is empty, it will not switch groups because then it would create an ++ empty G1 which would require switching groups again on the next signal. ++ Returns false iff groups were not switched because G2 was empty. */ ++static bool __attribute__ ((unused)) ++__condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq, ++ unsigned int *g1index, int private) ++{ ++ const unsigned int maxspin = 0; ++ unsigned int g1 = *g1index; ++ ++ /* If there is no waiter in G2, we don't do anything. The expression may ++ look odd but remember that __g_size might hold a negative value, so ++ putting the expression this way avoids relying on implementation-defined ++ behavior. ++ Note that this works correctly for a zero-initialized condvar too. */ ++ unsigned int old_orig_size = __condvar_get_orig_size (cond); ++ uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1; ++ if (((unsigned) (wseq - old_g1_start - old_orig_size) ++ + cond->__data.__g_size[g1 ^ 1]) == 0) ++ return false; ++ ++ /* Now try to close and quiesce G1. We have to consider the following kinds ++ of waiters: ++ * Waiters from less recent groups than G1 are not affected because ++ nothing will change for them apart from __g1_start getting larger. ++ * New waiters arriving concurrently with the group switching will all go ++ into G2 until we atomically make the switch. Waiters existing in G2 ++ are not affected. ++ * Waiters in G1 will be closed out immediately by setting a flag in ++ __g_signals, which will prevent waiters from blocking using a futex on ++ __g_signals and also notifies them that the group is closed. As a ++ result, they will eventually remove their group reference, allowing us ++ to close switch group roles. */ ++ ++ /* First, set the closed flag on __g_signals. This tells waiters that are ++ about to wait that they shouldn't do that anymore. This basically ++ serves as an advance notificaton of the upcoming change to __g1_start; ++ waiters interpret it as if __g1_start was larger than their waiter ++ sequence position. This allows us to change __g1_start after waiting ++ for all existing waiters with group references to leave, which in turn ++ makes recovery after stealing a signal simpler because it then can be ++ skipped if __g1_start indicates that the group is closed (otherwise, ++ we would have to recover always because waiters don't know how big their ++ groups are). Relaxed MO is fine. */ ++ atomic_fetch_or_relaxed (cond->__data.__g_signals + g1, 1); ++ ++ /* Wait until there are no group references anymore. The fetch-or operation ++ injects us into the modification order of __g_refs; release MO ensures ++ that waiters incrementing __g_refs after our fetch-or see the previous ++ changes to __g_signals and to __g1_start that had to happen before we can ++ switch this G1 and alias with an older group (we have two groups, so ++ aliasing requires switching group roles twice). Note that nobody else ++ can have set the wake-request flag, so we do not have to act upon it. ++ ++ Also note that it is harmless if older waiters or waiters from this G1 ++ get a group reference after we have quiesced the group because it will ++ remain closed for them either because of the closed flag in __g_signals ++ or the later update to __g1_start. New waiters will never arrive here ++ but instead continue to go into the still current G2. */ ++ unsigned r = atomic_fetch_or_release (cond->__data.__g_refs + g1, 0); ++ while ((r >> 1) > 0) ++ { ++ for (unsigned int spin = maxspin; ((r >> 1) > 0) && (spin > 0); spin--) ++ { ++ /* TODO Back off. */ ++ r = atomic_load_relaxed (cond->__data.__g_refs + g1); ++ } ++ if ((r >> 1) > 0) ++ { ++ /* There is still a waiter after spinning. Set the wake-request ++ flag and block. Relaxed MO is fine because this is just about ++ this futex word. */ ++ r = atomic_fetch_or_relaxed (cond->__data.__g_refs + g1, 1); ++ ++ if ((r >> 1) > 0) ++ futex_wait_simple (cond->__data.__g_refs + g1, r, private); ++ /* Reload here so we eventually see the most recent value even if we ++ do not spin. */ ++ r = atomic_load_relaxed (cond->__data.__g_refs + g1); ++ } ++ } ++ /* Acquire MO so that we synchronize with the release operation that waiters ++ use to decrement __g_refs and thus happen after the waiters we waited ++ for. */ ++ atomic_thread_fence_acquire (); ++ ++ /* Update __g1_start, which finishes closing this group. The value we add ++ will never be negative because old_orig_size can only be zero when we ++ switch groups the first time after a condvar was initialized, in which ++ case G1 will be at index 1 and we will add a value of 1. See above for ++ why this takes place after waiting for quiescence of the group. ++ Relaxed MO is fine because the change comes with no additional ++ constraints that others would have to observe. */ ++ __condvar_add_g1_start_relaxed (cond, ++ (old_orig_size << 1) + (g1 == 1 ? 1 : - 1)); ++ ++ /* Now reopen the group, thus enabling waiters to again block using the ++ futex controlled by __g_signals. Release MO so that observers that see ++ no signals (and thus can block) also see the write __g1_start and thus ++ that this is now a new group (see __pthread_cond_wait_common for the ++ matching acquire MO loads). */ ++ atomic_store_release (cond->__data.__g_signals + g1, 0); ++ ++ /* At this point, the old G1 is now a valid new G2 (but not in use yet). ++ No old waiter can neither grab a signal nor acquire a reference without ++ noticing that __g1_start is larger. ++ We can now publish the group switch by flipping the G2 index in __wseq. ++ Release MO so that this synchronizes with the acquire MO operation ++ waiters use to obtain a position in the waiter sequence. */ ++ wseq = __condvar_fetch_xor_wseq_release (cond, 1) >> 1; ++ g1 ^= 1; ++ *g1index ^= 1; ++ ++ /* These values are just observed by signalers, and thus protected by the ++ lock. */ ++ unsigned int orig_size = wseq - (old_g1_start + old_orig_size); ++ __condvar_set_orig_size (cond, orig_size); ++ /* Use and addition to not loose track of cancellations in what was ++ previously G2. */ ++ cond->__data.__g_size[g1] += orig_size; ++ ++ /* The new G1's size may be zero because of cancellations during its time ++ as G2. If this happens, there are no waiters that have to receive a ++ signal, so we do not need to add any and return false. */ ++ if (cond->__data.__g_size[g1] == 0) ++ return false; ++ ++ return true; ++} +diff --git a/nptl/pthread_cond_destroy.c b/nptl/pthread_cond_destroy.c +index 1acd804..5845c6a 100644 +--- a/nptl/pthread_cond_destroy.c ++++ b/nptl/pthread_cond_destroy.c +@@ -20,66 +20,42 @@ + #include <shlib-compat.h> + #include "pthreadP.h" + #include <stap-probe.h> +- +- ++#include <atomic.h> ++#include <futex-internal.h> ++ ++#include "pthread_cond_common.c" ++ ++/* See __pthread_cond_wait for a high-level description of the algorithm. ++ ++ A correct program must make sure that no waiters are blocked on the condvar ++ when it is destroyed, and that there are no concurrent signals or ++ broadcasts. To wake waiters reliably, the program must signal or ++ broadcast while holding the mutex or after having held the mutex. It must ++ also ensure that no signal or broadcast are still pending to unblock ++ waiters; IOW, because waiters can wake up spuriously, the program must ++ effectively ensure that destruction happens after the execution of those ++ signal or broadcast calls. ++ Thus, we can assume that all waiters that are still accessing the condvar ++ have been woken. We wait until they have confirmed to have woken up by ++ decrementing __wrefs. */ + int + __pthread_cond_destroy (pthread_cond_t *cond) + { +- int pshared = (cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; +- + LIBC_PROBE (cond_destroy, 1, cond); + +- /* Make sure we are alone. */ +- lll_lock (cond->__data.__lock, pshared); +- +- if (cond->__data.__total_seq > cond->__data.__wakeup_seq) +- { +- /* If there are still some waiters which have not been +- woken up, this is an application bug. */ +- lll_unlock (cond->__data.__lock, pshared); +- return EBUSY; +- } +- +- /* Tell pthread_cond_*wait that this condvar is being destroyed. */ +- cond->__data.__total_seq = -1ULL; +- +- /* If there are waiters which have been already signalled or +- broadcasted, but still are using the pthread_cond_t structure, +- pthread_cond_destroy needs to wait for them. */ +- unsigned int nwaiters = cond->__data.__nwaiters; +- +- if (nwaiters >= (1 << COND_NWAITERS_SHIFT)) ++ /* Set the wake request flag. We could also spin, but destruction that is ++ concurrent with still-active waiters is probably neither common nor ++ performance critical. Acquire MO to synchronize with waiters confirming ++ that they finished. */ ++ unsigned int wrefs = atomic_fetch_or_acquire (&cond->__data.__wrefs, 4); ++ int private = __condvar_get_private (wrefs); ++ while (wrefs >> 3 != 0) + { +- /* Wake everybody on the associated mutex in case there are +- threads that have been requeued to it. +- Without this, pthread_cond_destroy could block potentially +- for a long time or forever, as it would depend on other +- thread's using the mutex. +- When all threads waiting on the mutex are woken up, pthread_cond_wait +- only waits for threads to acquire and release the internal +- condvar lock. */ +- if (cond->__data.__mutex != NULL +- && cond->__data.__mutex != (void *) ~0l) +- { +- pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; +- lll_futex_wake (&mut->__data.__lock, INT_MAX, +- PTHREAD_MUTEX_PSHARED (mut)); +- } +- +- do +- { +- lll_unlock (cond->__data.__lock, pshared); +- +- lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared); +- +- lll_lock (cond->__data.__lock, pshared); +- +- nwaiters = cond->__data.__nwaiters; +- } +- while (nwaiters >= (1 << COND_NWAITERS_SHIFT)); ++ futex_wait_simple (&cond->__data.__wrefs, wrefs, private); ++ /* See above. */ ++ wrefs = atomic_load_acquire (&cond->__data.__wrefs); + } +- ++ /* The memory the condvar occupies can now be reused. */ + return 0; + } + versioned_symbol (libpthread, __pthread_cond_destroy, +diff --git a/nptl/pthread_cond_init.c b/nptl/pthread_cond_init.c +index 9023370..c1eac5f 100644 +--- a/nptl/pthread_cond_init.c ++++ b/nptl/pthread_cond_init.c +@@ -19,25 +19,29 @@ + #include <shlib-compat.h> + #include "pthreadP.h" + #include <stap-probe.h> ++#include <string.h> + + ++/* See __pthread_cond_wait for details. */ + int + __pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr) + { + struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr; + +- cond->__data.__lock = LLL_LOCK_INITIALIZER; +- cond->__data.__futex = 0; +- cond->__data.__nwaiters = (icond_attr != NULL +- ? ((icond_attr->value >> 1) +- & ((1 << COND_NWAITERS_SHIFT) - 1)) +- : CLOCK_REALTIME); +- cond->__data.__total_seq = 0; +- cond->__data.__wakeup_seq = 0; +- cond->__data.__woken_seq = 0; +- cond->__data.__mutex = (icond_attr == NULL || (icond_attr->value & 1) == 0 +- ? NULL : (void *) ~0l); +- cond->__data.__broadcast_seq = 0; ++ memset (cond, 0, sizeof (pthread_cond_t)); ++ ++ /* Update the pretty printers if the internal representation of icond_attr ++ is changed. */ ++ ++ /* Iff not equal to ~0l, this is a PTHREAD_PROCESS_PRIVATE condvar. */ ++ if (icond_attr != NULL && (icond_attr->value & 1) != 0) ++ cond->__data.__wrefs |= __PTHREAD_COND_SHARED_MASK; ++ int clockid = (icond_attr != NULL ++ ? ((icond_attr->value >> 1) & ((1 << COND_CLOCK_BITS) - 1)) ++ : CLOCK_REALTIME); ++ /* If 0, CLOCK_REALTIME is used; CLOCK_MONOTONIC otherwise. */ ++ if (clockid != CLOCK_REALTIME) ++ cond->__data.__wrefs |= __PTHREAD_COND_CLOCK_MONOTONIC_MASK; + + LIBC_PROBE (cond_init, 2, cond, cond_attr); + +diff --git a/nptl/pthread_cond_signal.c b/nptl/pthread_cond_signal.c +index b3a6d3d..a95d569 100644 +--- a/nptl/pthread_cond_signal.c ++++ b/nptl/pthread_cond_signal.c +@@ -19,62 +19,79 @@ + #include <endian.h> + #include <errno.h> + #include <sysdep.h> +-#include <lowlevellock.h> ++#include <futex-internal.h> + #include <pthread.h> + #include <pthreadP.h> ++#include <atomic.h> ++#include <stdint.h> + + #include <shlib-compat.h> +-#include <kernel-features.h> + #include <stap-probe.h> + ++#include "pthread_cond_common.c" + ++/* See __pthread_cond_wait for a high-level description of the algorithm. */ + int + __pthread_cond_signal (pthread_cond_t *cond) + { +- int pshared = (cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; +- + LIBC_PROBE (cond_signal, 1, cond); + +- /* Make sure we are alone. */ +- lll_lock (cond->__data.__lock, pshared); +- +- /* Are there any waiters to be woken? */ +- if (cond->__data.__total_seq > cond->__data.__wakeup_seq) ++ /* First check whether there are waiters. Relaxed MO is fine for that for ++ the same reasons that relaxed MO is fine when observing __wseq (see ++ below). */ ++ unsigned int wrefs = atomic_load_relaxed (&cond->__data.__wrefs); ++ if (wrefs >> 3 == 0) ++ return 0; ++ int private = __condvar_get_private (wrefs); ++ ++ __condvar_acquire_lock (cond, private); ++ ++ /* Load the waiter sequence number, which represents our relative ordering ++ to any waiters. Relaxed MO is sufficient for that because: ++ 1) We can pick any position that is allowed by external happens-before ++ constraints. In particular, if another __pthread_cond_wait call ++ happened before us, this waiter must be eligible for being woken by ++ us. The only way do establish such a happens-before is by signaling ++ while having acquired the mutex associated with the condvar and ++ ensuring that the signal's critical section happens after the waiter. ++ Thus, the mutex ensures that we see that waiter's __wseq increase. ++ 2) Once we pick a position, we do not need to communicate this to the ++ program via a happens-before that we set up: First, any wake-up could ++ be a spurious wake-up, so the program must not interpret a wake-up as ++ an indication that the waiter happened before a particular signal; ++ second, a program cannot detect whether a waiter has not yet been ++ woken (i.e., it cannot distinguish between a non-woken waiter and one ++ that has been woken but hasn't resumed execution yet), and thus it ++ cannot try to deduce that a signal happened before a particular ++ waiter. */ ++ unsigned long long int wseq = __condvar_load_wseq_relaxed (cond); ++ unsigned int g1 = (wseq & 1) ^ 1; ++ wseq >>= 1; ++ bool do_futex_wake = false; ++ ++ /* If G1 is still receiving signals, we put the signal there. If not, we ++ check if G2 has waiters, and if so, quiesce and switch G1 to the former ++ G2; if this results in a new G1 with waiters (G2 might have cancellations ++ already, see __condvar_quiesce_and_switch_g1), we put the signal in the ++ new G1. */ ++ if ((cond->__data.__g_size[g1] != 0) ++ || __condvar_quiesce_and_switch_g1 (cond, wseq, &g1, private)) + { +- /* Yes. Mark one of them as woken. */ +- ++cond->__data.__wakeup_seq; +- ++cond->__data.__futex; +- +-#if (defined lll_futex_cmp_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- pthread_mutex_t *mut = cond->__data.__mutex; +- +- if (USE_REQUEUE_PI (mut) +- /* This can only really fail with a ENOSYS, since nobody can modify +- futex while we have the cond_lock. */ +- && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0, +- &mut->__data.__lock, +- cond->__data.__futex, pshared) == 0) +- { +- lll_unlock (cond->__data.__lock, pshared); +- return 0; +- } +- else +-#endif +- /* Wake one. */ +- if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, +- 1, 1, +- &cond->__data.__lock, +- pshared), 0)) +- return 0; +- +- /* Fallback if neither of them work. */ +- lll_futex_wake (&cond->__data.__futex, 1, pshared); ++ /* Add a signal. Relaxed MO is fine because signaling does not need to ++ establish a happens-before relation (see above). We do not mask the ++ release-MO store when initializing a group in ++ __condvar_quiesce_and_switch_g1 because we use an atomic ++ read-modify-write and thus extend that store's release sequence. */ ++ atomic_fetch_add_relaxed (cond->__data.__g_signals + g1, 2); ++ cond->__data.__g_size[g1]--; ++ /* TODO Only set it if there are indeed futex waiters. */ ++ do_futex_wake = true; + } + +- /* We are done. */ +- lll_unlock (cond->__data.__lock, pshared); ++ __condvar_release_lock (cond, private); ++ ++ if (do_futex_wake) ++ futex_wake (cond->__data.__g_signals + g1, 1, private); + + return 0; + } +diff --git a/nptl/pthread_cond_timedwait.c b/nptl/pthread_cond_timedwait.c +deleted file mode 100644 +index 711a51d..0000000 +--- a/nptl/pthread_cond_timedwait.c ++++ /dev/null +@@ -1,268 +0,0 @@ +-/* Copyright (C) 2003-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Martin Schwidefsky <schwidefsky@de.ibm.com>, 2003. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <endian.h> +-#include <errno.h> +-#include <sysdep.h> +-#include <lowlevellock.h> +-#include <pthread.h> +-#include <pthreadP.h> +-#include <sys/time.h> +-#include <kernel-features.h> +- +-#include <shlib-compat.h> +- +-#ifndef HAVE_CLOCK_GETTIME_VSYSCALL +-# undef INTERNAL_VSYSCALL +-# define INTERNAL_VSYSCALL INTERNAL_SYSCALL +-# undef INLINE_VSYSCALL +-# define INLINE_VSYSCALL INLINE_SYSCALL +-#else +-# include <libc-vdso.h> +-#endif +- +-/* Cleanup handler, defined in pthread_cond_wait.c. */ +-extern void __condvar_cleanup (void *arg) +- __attribute__ ((visibility ("hidden"))); +- +-struct _condvar_cleanup_buffer +-{ +- int oldtype; +- pthread_cond_t *cond; +- pthread_mutex_t *mutex; +- unsigned int bc_seq; +-}; +- +-int +-__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, +- const struct timespec *abstime) +-{ +- struct _pthread_cleanup_buffer buffer; +- struct _condvar_cleanup_buffer cbuffer; +- int result = 0; +- +- /* Catch invalid parameters. */ +- if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) +- return EINVAL; +- +- int pshared = (cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; +- +-#if (defined lll_futex_timed_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- int pi_flag = 0; +-#endif +- +- /* Make sure we are alone. */ +- lll_lock (cond->__data.__lock, pshared); +- +- /* Now we can release the mutex. */ +- int err = __pthread_mutex_unlock_usercnt (mutex, 0); +- if (err) +- { +- lll_unlock (cond->__data.__lock, pshared); +- return err; +- } +- +- /* We have one new user of the condvar. */ +- ++cond->__data.__total_seq; +- ++cond->__data.__futex; +- cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT; +- +- /* Work around the fact that the kernel rejects negative timeout values +- despite them being valid. */ +- if (__glibc_unlikely (abstime->tv_sec < 0)) +- goto timeout; +- +- /* Remember the mutex we are using here. If there is already a +- different address store this is a bad user bug. Do not store +- anything for pshared condvars. */ +- if (cond->__data.__mutex != (void *) ~0l) +- cond->__data.__mutex = mutex; +- +- /* Prepare structure passed to cancellation handler. */ +- cbuffer.cond = cond; +- cbuffer.mutex = mutex; +- +- /* Before we block we enable cancellation. Therefore we have to +- install a cancellation handler. */ +- __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer); +- +- /* The current values of the wakeup counter. The "woken" counter +- must exceed this value. */ +- unsigned long long int val; +- unsigned long long int seq; +- val = seq = cond->__data.__wakeup_seq; +- /* Remember the broadcast counter. */ +- cbuffer.bc_seq = cond->__data.__broadcast_seq; +- +- while (1) +- { +-#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ +- || !defined lll_futex_timed_wait_bitset) +- struct timespec rt; +- { +-# ifdef __NR_clock_gettime +- INTERNAL_SYSCALL_DECL (err); +- (void) INTERNAL_VSYSCALL (clock_gettime, err, 2, +- (cond->__data.__nwaiters +- & ((1 << COND_NWAITERS_SHIFT) - 1)), +- &rt); +- /* Convert the absolute timeout value to a relative timeout. */ +- rt.tv_sec = abstime->tv_sec - rt.tv_sec; +- rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec; +-# else +- /* Get the current time. So far we support only one clock. */ +- struct timeval tv; +- (void) __gettimeofday (&tv, NULL); +- +- /* Convert the absolute timeout value to a relative timeout. */ +- rt.tv_sec = abstime->tv_sec - tv.tv_sec; +- rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; +-# endif +- } +- if (rt.tv_nsec < 0) +- { +- rt.tv_nsec += 1000000000; +- --rt.tv_sec; +- } +- /* Did we already time out? */ +- if (__glibc_unlikely (rt.tv_sec < 0)) +- { +- if (cbuffer.bc_seq != cond->__data.__broadcast_seq) +- goto bc_out; +- +- goto timeout; +- } +-#endif +- +- unsigned int futex_val = cond->__data.__futex; +- +- /* Prepare to wait. Release the condvar futex. */ +- lll_unlock (cond->__data.__lock, pshared); +- +- /* Enable asynchronous cancellation. Required by the standard. */ +- cbuffer.oldtype = __pthread_enable_asynccancel (); +- +-/* REQUEUE_PI was implemented after FUTEX_CLOCK_REALTIME, so it is sufficient +- to check just the former. */ +-#if (defined lll_futex_timed_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- /* If pi_flag remained 1 then it means that we had the lock and the mutex +- but a spurious waker raced ahead of us. Give back the mutex before +- going into wait again. */ +- if (pi_flag) +- { +- __pthread_mutex_cond_lock_adjust (mutex); +- __pthread_mutex_unlock_usercnt (mutex, 0); +- } +- pi_flag = USE_REQUEUE_PI (mutex); +- +- if (pi_flag) +- { +- unsigned int clockbit = (cond->__data.__nwaiters & 1 +- ? 0 : FUTEX_CLOCK_REALTIME); +- err = lll_futex_timed_wait_requeue_pi (&cond->__data.__futex, +- futex_val, abstime, clockbit, +- &mutex->__data.__lock, +- pshared); +- pi_flag = (err == 0); +- } +- else +-#endif +- +- { +-#if (!defined __ASSUME_FUTEX_CLOCK_REALTIME \ +- || !defined lll_futex_timed_wait_bitset) +- /* Wait until woken by signal or broadcast. */ +- err = lll_futex_timed_wait (&cond->__data.__futex, +- futex_val, &rt, pshared); +-#else +- unsigned int clockbit = (cond->__data.__nwaiters & 1 +- ? 0 : FUTEX_CLOCK_REALTIME); +- err = lll_futex_timed_wait_bitset (&cond->__data.__futex, futex_val, +- abstime, clockbit, pshared); +-#endif +- } +- +- /* Disable asynchronous cancellation. */ +- __pthread_disable_asynccancel (cbuffer.oldtype); +- +- /* We are going to look at shared data again, so get the lock. */ +- lll_lock (cond->__data.__lock, pshared); +- +- /* If a broadcast happened, we are done. */ +- if (cbuffer.bc_seq != cond->__data.__broadcast_seq) +- goto bc_out; +- +- /* Check whether we are eligible for wakeup. */ +- val = cond->__data.__wakeup_seq; +- if (val != seq && cond->__data.__woken_seq != val) +- break; +- +- /* Not woken yet. Maybe the time expired? */ +- if (__glibc_unlikely (err == -ETIMEDOUT)) +- { +- timeout: +- /* Yep. Adjust the counters. */ +- ++cond->__data.__wakeup_seq; +- ++cond->__data.__futex; +- +- /* The error value. */ +- result = ETIMEDOUT; +- break; +- } +- } +- +- /* Another thread woken up. */ +- ++cond->__data.__woken_seq; +- +- bc_out: +- +- cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; +- +- /* If pthread_cond_destroy was called on this variable already, +- notify the pthread_cond_destroy caller all waiters have left +- and it can be successfully destroyed. */ +- if (cond->__data.__total_seq == -1ULL +- && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) +- lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); +- +- /* We are done with the condvar. */ +- lll_unlock (cond->__data.__lock, pshared); +- +- /* The cancellation handling is back to normal, remove the handler. */ +- __pthread_cleanup_pop (&buffer, 0); +- +- /* Get the mutex before returning. */ +-#if (defined lll_futex_timed_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- if (pi_flag) +- { +- __pthread_mutex_cond_lock_adjust (mutex); +- err = 0; +- } +- else +-#endif +- err = __pthread_mutex_cond_lock (mutex); +- +- return err ?: result; +-} +- +-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, +- GLIBC_2_3_2); +diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c +index 3f62acc..2b43402 100644 +--- a/nptl/pthread_cond_wait.c ++++ b/nptl/pthread_cond_wait.c +@@ -19,219 +19,655 @@ + #include <endian.h> + #include <errno.h> + #include <sysdep.h> +-#include <lowlevellock.h> ++#include <futex-internal.h> + #include <pthread.h> + #include <pthreadP.h> +-#include <kernel-features.h> ++#include <sys/time.h> ++#include <atomic.h> ++#include <stdint.h> ++#include <stdbool.h> + + #include <shlib-compat.h> + #include <stap-probe.h> ++#include <time.h> ++ ++#include "pthread_cond_common.c" ++ + + struct _condvar_cleanup_buffer + { +- int oldtype; ++ uint64_t wseq; + pthread_cond_t *cond; + pthread_mutex_t *mutex; +- unsigned int bc_seq; ++ int private; + }; + + +-void +-__attribute__ ((visibility ("hidden"))) +-__condvar_cleanup (void *arg) ++/* Decrease the waiter reference count. */ ++static void ++__condvar_confirm_wakeup (pthread_cond_t *cond, int private) + { +- struct _condvar_cleanup_buffer *cbuffer = +- (struct _condvar_cleanup_buffer *) arg; +- unsigned int destroying; +- int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; ++ /* If destruction is pending (i.e., the wake-request flag is nonzero) and we ++ are the last waiter (prior value of __wrefs was 1 << 3), then wake any ++ threads waiting in pthread_cond_destroy. Release MO to synchronize with ++ these threads. Don't bother clearing the wake-up request flag. */ ++ if ((atomic_fetch_add_release (&cond->__data.__wrefs, -8) >> 2) == 3) ++ futex_wake (&cond->__data.__wrefs, INT_MAX, private); ++} ++ + +- /* We are going to modify shared data. */ +- lll_lock (cbuffer->cond->__data.__lock, pshared); ++/* Cancel waiting after having registered as a waiter previously. SEQ is our ++ position and G is our group index. ++ The goal of cancellation is to make our group smaller if that is still ++ possible. If we are in a closed group, this is not possible anymore; in ++ this case, we need to send a replacement signal for the one we effectively ++ consumed because the signal should have gotten consumed by another waiter ++ instead; we must not both cancel waiting and consume a signal. ++ ++ Must not be called while still holding a reference on the group. ++ ++ Returns true iff we consumed a signal. ++ ++ On some kind of timeouts, we may be able to pretend that a signal we ++ effectively consumed happened before the timeout (i.e., similarly to first ++ spinning on signals before actually checking whether the timeout has ++ passed already). Doing this would allow us to skip sending a replacement ++ signal, but this case might happen rarely because the end of the timeout ++ must race with someone else sending a signal. Therefore, we don't bother ++ trying to optimize this. */ ++static void ++__condvar_cancel_waiting (pthread_cond_t *cond, uint64_t seq, unsigned int g, ++ int private) ++{ ++ bool consumed_signal = false; + +- if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) ++ /* No deadlock with group switching is possible here because we have do ++ not hold a reference on the group. */ ++ __condvar_acquire_lock (cond, private); ++ ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond) >> 1; ++ if (g1_start > seq) ++ { ++ /* Our group is closed, so someone provided enough signals for it. ++ Thus, we effectively consumed a signal. */ ++ consumed_signal = true; ++ } ++ else + { +- /* This thread is not waiting anymore. Adjust the sequence counters +- appropriately. We do not increment WAKEUP_SEQ if this would +- bump it over the value of TOTAL_SEQ. This can happen if a thread +- was woken and then canceled. */ +- if (cbuffer->cond->__data.__wakeup_seq +- < cbuffer->cond->__data.__total_seq) ++ if (g1_start + __condvar_get_orig_size (cond) <= seq) ++ { ++ /* We are in the current G2 and thus cannot have consumed a signal. ++ Reduce its effective size or handle overflow. Remember that in ++ G2, unsigned int size is zero or a negative value. */ ++ if (cond->__data.__g_size[g] + __PTHREAD_COND_MAX_GROUP_SIZE > 0) ++ { ++ cond->__data.__g_size[g]--; ++ } ++ else ++ { ++ /* Cancellations would overflow the maximum group size. Just ++ wake up everyone spuriously to create a clean state. This ++ also means we do not consume a signal someone else sent. */ ++ __condvar_release_lock (cond, private); ++ __pthread_cond_broadcast (cond); ++ return; ++ } ++ } ++ else + { +- ++cbuffer->cond->__data.__wakeup_seq; +- ++cbuffer->cond->__data.__futex; ++ /* We are in current G1. If the group's size is zero, someone put ++ a signal in the group that nobody else but us can consume. */ ++ if (cond->__data.__g_size[g] == 0) ++ consumed_signal = true; ++ else ++ { ++ /* Otherwise, we decrease the size of the group. This is ++ equivalent to atomically putting in a signal just for us and ++ consuming it right away. We do not consume a signal sent ++ by someone else. We also cannot have consumed a futex ++ wake-up because if we were cancelled or timed out in a futex ++ call, the futex will wake another waiter. */ ++ cond->__data.__g_size[g]--; ++ } + } +- ++cbuffer->cond->__data.__woken_seq; + } + +- cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; ++ __condvar_release_lock (cond, private); + +- /* If pthread_cond_destroy was called on this variable already, +- notify the pthread_cond_destroy caller all waiters have left +- and it can be successfully destroyed. */ +- destroying = 0; +- if (cbuffer->cond->__data.__total_seq == -1ULL +- && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) ++ if (consumed_signal) + { +- lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared); +- destroying = 1; ++ /* We effectively consumed a signal even though we didn't want to. ++ Therefore, we need to send a replacement signal. ++ If we would want to optimize this, we could do what ++ pthread_cond_signal does right in the critical section above. */ ++ __pthread_cond_signal (cond); + } ++} + +- /* We are done. */ +- lll_unlock (cbuffer->cond->__data.__lock, pshared); +- +- /* Wake everybody to make sure no condvar signal gets lost. */ +- if (! destroying) +- lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared); +- +- /* Get the mutex before returning unless asynchronous cancellation +- is in effect. We don't try to get the mutex if we already own it. */ +- if (!(USE_REQUEUE_PI (cbuffer->mutex)) +- || ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK) +- != THREAD_GETMEM (THREAD_SELF, tid))) +- { +- __pthread_mutex_cond_lock (cbuffer->mutex); +- } +- else +- __pthread_mutex_cond_lock_adjust (cbuffer->mutex); ++/* Wake up any signalers that might be waiting. */ ++static void ++__condvar_dec_grefs (pthread_cond_t *cond, unsigned int g, int private) ++{ ++ /* Release MO to synchronize-with the acquire load in ++ __condvar_quiesce_and_switch_g1. */ ++ if (atomic_fetch_add_release (cond->__data.__g_refs + g, -2) == 3) ++ { ++ /* Clear the wake-up request flag before waking up. We do not need more ++ than relaxed MO and it doesn't matter if we apply this for an aliased ++ group because we wake all futex waiters right after clearing the ++ flag. */ ++ atomic_fetch_and_relaxed (cond->__data.__g_refs + g, ~(unsigned int) 1); ++ futex_wake (cond->__data.__g_refs + g, INT_MAX, private); ++ } + } + ++/* Clean-up for cancellation of waiters waiting for normal signals. We cancel ++ our registration as a waiter, confirm we have woken up, and re-acquire the ++ mutex. */ ++static void ++__condvar_cleanup_waiting (void *arg) ++{ ++ struct _condvar_cleanup_buffer *cbuffer = ++ (struct _condvar_cleanup_buffer *) arg; ++ pthread_cond_t *cond = cbuffer->cond; ++ unsigned g = cbuffer->wseq & 1; + +-int +-__pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) ++ __condvar_dec_grefs (cond, g, cbuffer->private); ++ ++ __condvar_cancel_waiting (cond, cbuffer->wseq >> 1, g, cbuffer->private); ++ /* FIXME With the current cancellation implementation, it is possible that ++ a thread is cancelled after it has returned from a syscall. This could ++ result in a cancelled waiter consuming a futex wake-up that is then ++ causing another waiter in the same group to not wake up. To work around ++ this issue until we have fixed cancellation, just add a futex wake-up ++ conservatively. */ ++ futex_wake (cond->__data.__g_signals + g, 1, cbuffer->private); ++ ++ __condvar_confirm_wakeup (cond, cbuffer->private); ++ ++ /* XXX If locking the mutex fails, should we just stop execution? This ++ might be better than silently ignoring the error. */ ++ __pthread_mutex_cond_lock (cbuffer->mutex); ++} ++ ++/* This condvar implementation guarantees that all calls to signal and ++ broadcast and all of the three virtually atomic parts of each call to wait ++ (i.e., (1) releasing the mutex and blocking, (2) unblocking, and (3) re- ++ acquiring the mutex) happen in some total order that is consistent with the ++ happens-before relations in the calling program. However, this order does ++ not necessarily result in additional happens-before relations being ++ established (which aligns well with spurious wake-ups being allowed). ++ ++ All waiters acquire a certain position in a 64b waiter sequence (__wseq). ++ This sequence determines which waiters are allowed to consume signals. ++ A broadcast is equal to sending as many signals as are unblocked waiters. ++ When a signal arrives, it samples the current value of __wseq with a ++ relaxed-MO load (i.e., the position the next waiter would get). (This is ++ sufficient because it is consistent with happens-before; the caller can ++ enforce stronger ordering constraints by calling signal while holding the ++ mutex.) Only waiters with a position less than the __wseq value observed ++ by the signal are eligible to consume this signal. ++ ++ This would be straight-forward to implement if waiters would just spin but ++ we need to let them block using futexes. Futexes give no guarantee of ++ waking in FIFO order, so we cannot reliably wake eligible waiters if we ++ just use a single futex. Also, futex words are 32b in size, but we need ++ to distinguish more than 1<<32 states because we need to represent the ++ order of wake-up (and thus which waiters are eligible to consume signals); ++ blocking in a futex is not atomic with a waiter determining its position in ++ the waiter sequence, so we need the futex word to reliably notify waiters ++ that they should not attempt to block anymore because they have been ++ already signaled in the meantime. While an ABA issue on a 32b value will ++ be rare, ignoring it when we are aware of it is not the right thing to do ++ either. ++ ++ Therefore, we use a 64b counter to represent the waiter sequence (on ++ architectures which only support 32b atomics, we use a few bits less). ++ To deal with the blocking using futexes, we maintain two groups of waiters: ++ * Group G1 consists of waiters that are all eligible to consume signals; ++ incoming signals will always signal waiters in this group until all ++ waiters in G1 have been signaled. ++ * Group G2 consists of waiters that arrive when a G1 is present and still ++ contains waiters that have not been signaled. When all waiters in G1 ++ are signaled and a new signal arrives, the new signal will convert G2 ++ into the new G1 and create a new G2 for future waiters. ++ ++ We cannot allocate new memory because of process-shared condvars, so we ++ have just two slots of groups that change their role between G1 and G2. ++ Each has a separate futex word, a number of signals available for ++ consumption, a size (number of waiters in the group that have not been ++ signaled), and a reference count. ++ ++ The group reference count is used to maintain the number of waiters that ++ are using the group's futex. Before a group can change its role, the ++ reference count must show that no waiters are using the futex anymore; this ++ prevents ABA issues on the futex word. ++ ++ To represent which intervals in the waiter sequence the groups cover (and ++ thus also which group slot contains G1 or G2), we use a 64b counter to ++ designate the start position of G1 (inclusive), and a single bit in the ++ waiter sequence counter to represent which group slot currently contains ++ G2. This allows us to switch group roles atomically wrt. waiters obtaining ++ a position in the waiter sequence. The G1 start position allows waiters to ++ figure out whether they are in a group that has already been completely ++ signaled (i.e., if the current G1 starts at a later position that the ++ waiter's position). Waiters cannot determine whether they are currently ++ in G2 or G1 -- but they do not have too because all they are interested in ++ is whether there are available signals, and they always start in G2 (whose ++ group slot they know because of the bit in the waiter sequence. Signalers ++ will simply fill the right group until it is completely signaled and can ++ be closed (they do not switch group roles until they really have to to ++ decrease the likelihood of having to wait for waiters still holding a ++ reference on the now-closed G1). ++ ++ Signalers maintain the initial size of G1 to be able to determine where ++ G2 starts (G2 is always open-ended until it becomes G1). They track the ++ remaining size of a group; when waiters cancel waiting (due to PThreads ++ cancellation or timeouts), they will decrease this remaining size as well. ++ ++ To implement condvar destruction requirements (i.e., that ++ pthread_cond_destroy can be called as soon as all waiters have been ++ signaled), waiters increment a reference count before starting to wait and ++ decrement it after they stopped waiting but right before they acquire the ++ mutex associated with the condvar. ++ ++ pthread_cond_t thus consists of the following (bits that are used for ++ flags and are not part of the primary value of each field but necessary ++ to make some things atomic or because there was no space for them ++ elsewhere in the data structure): ++ ++ __wseq: Waiter sequence counter ++ * LSB is index of current G2. ++ * Waiters fetch-add while having acquire the mutex associated with the ++ condvar. Signalers load it and fetch-xor it concurrently. ++ __g1_start: Starting position of G1 (inclusive) ++ * LSB is index of current G2. ++ * Modified by signalers while having acquired the condvar-internal lock ++ and observed concurrently by waiters. ++ __g1_orig_size: Initial size of G1 ++ * The two least-significant bits represent the condvar-internal lock. ++ * Only accessed while having acquired the condvar-internal lock. ++ __wrefs: Waiter reference counter. ++ * Bit 2 is true if waiters should run futex_wake when they remove the ++ last reference. pthread_cond_destroy uses this as futex word. ++ * Bit 1 is the clock ID (0 == CLOCK_REALTIME, 1 == CLOCK_MONOTONIC). ++ * Bit 0 is true iff this is a process-shared condvar. ++ * Simple reference count used by both waiters and pthread_cond_destroy. ++ (If the format of __wrefs is changed, update nptl_lock_constants.pysym ++ and the pretty printers.) ++ For each of the two groups, we have: ++ __g_refs: Futex waiter reference count. ++ * LSB is true if waiters should run futex_wake when they remove the ++ last reference. ++ * Reference count used by waiters concurrently with signalers that have ++ acquired the condvar-internal lock. ++ __g_signals: The number of signals that can still be consumed. ++ * Used as a futex word by waiters. Used concurrently by waiters and ++ signalers. ++ * LSB is true iff this group has been completely signaled (i.e., it is ++ closed). ++ __g_size: Waiters remaining in this group (i.e., which have not been ++ signaled yet. ++ * Accessed by signalers and waiters that cancel waiting (both do so only ++ when having acquired the condvar-internal lock. ++ * The size of G2 is always zero because it cannot be determined until ++ the group becomes G1. ++ * Although this is of unsigned type, we rely on using unsigned overflow ++ rules to make this hold effectively negative values too (in ++ particular, when waiters in G2 cancel waiting). ++ ++ A PTHREAD_COND_INITIALIZER condvar has all fields set to zero, which yields ++ a condvar that has G2 starting at position 0 and a G1 that is closed. ++ ++ Because waiters do not claim ownership of a group right when obtaining a ++ position in __wseq but only reference count the group when using futexes ++ to block, it can happen that a group gets closed before a waiter can ++ increment the reference count. Therefore, waiters have to check whether ++ their group is already closed using __g1_start. They also have to perform ++ this check when spinning when trying to grab a signal from __g_signals. ++ Note that for these checks, using relaxed MO to load __g1_start is ++ sufficient because if a waiter can see a sufficiently large value, it could ++ have also consume a signal in the waiters group. ++ ++ Waiters try to grab a signal from __g_signals without holding a reference ++ count, which can lead to stealing a signal from a more recent group after ++ their own group was already closed. They cannot always detect whether they ++ in fact did because they do not know when they stole, but they can ++ conservatively add a signal back to the group they stole from; if they ++ did so unnecessarily, all that happens is a spurious wake-up. To make this ++ even less likely, __g1_start contains the index of the current g2 too, ++ which allows waiters to check if there aliasing on the group slots; if ++ there wasn't, they didn't steal from the current G1, which means that the ++ G1 they stole from must have been already closed and they do not need to ++ fix anything. ++ ++ It is essential that the last field in pthread_cond_t is __g_signals[1]: ++ The previous condvar used a pointer-sized field in pthread_cond_t, so a ++ PTHREAD_COND_INITIALIZER from that condvar implementation might only ++ initialize 4 bytes to zero instead of the 8 bytes we need (i.e., 44 bytes ++ in total instead of the 48 we need). __g_signals[1] is not accessed before ++ the first group switch (G2 starts at index 0), which will set its value to ++ zero after a harmless fetch-or whose return value is ignored. This ++ effectively completes initialization. ++ ++ ++ Limitations: ++ * This condvar isn't designed to allow for more than ++ __PTHREAD_COND_MAX_GROUP_SIZE * (1 << 31) calls to __pthread_cond_wait. ++ * More than __PTHREAD_COND_MAX_GROUP_SIZE concurrent waiters are not ++ supported. ++ * Beyond what is allowed as errors by POSIX or documented, we can also ++ return the following errors: ++ * EPERM if MUTEX is a recursive mutex and the caller doesn't own it. ++ * EOWNERDEAD or ENOTRECOVERABLE when using robust mutexes. Unlike ++ for other errors, this can happen when we re-acquire the mutex; this ++ isn't allowed by POSIX (which requires all errors to virtually happen ++ before we release the mutex or change the condvar state), but there's ++ nothing we can do really. ++ * When using PTHREAD_MUTEX_PP_* mutexes, we can also return all errors ++ returned by __pthread_tpp_change_priority. We will already have ++ released the mutex in such cases, so the caller cannot expect to own ++ MUTEX. ++ ++ Other notes: ++ * Instead of the normal mutex unlock / lock functions, we use ++ __pthread_mutex_unlock_usercnt(m, 0) / __pthread_mutex_cond_lock(m) ++ because those will not change the mutex-internal users count, so that it ++ can be detected when a condvar is still associated with a particular ++ mutex because there is a waiter blocked on this condvar using this mutex. ++*/ ++static __always_inline int ++__pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, ++ const struct timespec *abstime) + { +- struct _pthread_cleanup_buffer buffer; +- struct _condvar_cleanup_buffer cbuffer; ++ const int maxspin = 0; + int err; +- int pshared = (cond->__data.__mutex == (void *) ~0l) +- ? LLL_SHARED : LLL_PRIVATE; +- +-#if (defined lll_futex_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- int pi_flag = 0; +-#endif ++ int result = 0; + + LIBC_PROBE (cond_wait, 2, cond, mutex); + +- /* Make sure we are alone. */ +- lll_lock (cond->__data.__lock, pshared); +- +- /* Now we can release the mutex. */ ++ /* Acquire a position (SEQ) in the waiter sequence (WSEQ). We use an ++ atomic operation because signals and broadcasts may update the group ++ switch without acquiring the mutex. We do not need release MO here ++ because we do not need to establish any happens-before relation with ++ signalers (see __pthread_cond_signal); modification order alone ++ establishes a total order of waiters/signals. We do need acquire MO ++ to synchronize with group reinitialization in ++ __condvar_quiesce_and_switch_g1. */ ++ uint64_t wseq = __condvar_fetch_add_wseq_acquire (cond, 2); ++ /* Find our group's index. We always go into what was G2 when we acquired ++ our position. */ ++ unsigned int g = wseq & 1; ++ uint64_t seq = wseq >> 1; ++ ++ /* Increase the waiter reference count. Relaxed MO is sufficient because ++ we only need to synchronize when decrementing the reference count. */ ++ unsigned int flags = atomic_fetch_add_relaxed (&cond->__data.__wrefs, 8); ++ int private = __condvar_get_private (flags); ++ ++ /* Now that we are registered as a waiter, we can release the mutex. ++ Waiting on the condvar must be atomic with releasing the mutex, so if ++ the mutex is used to establish a happens-before relation with any ++ signaler, the waiter must be visible to the latter; thus, we release the ++ mutex after registering as waiter. ++ If releasing the mutex fails, we just cancel our registration as a ++ waiter and confirm that we have woken up. */ + err = __pthread_mutex_unlock_usercnt (mutex, 0); +- if (__glibc_unlikely (err)) ++ if (__glibc_unlikely (err != 0)) + { +- lll_unlock (cond->__data.__lock, pshared); ++ __condvar_cancel_waiting (cond, seq, g, private); ++ __condvar_confirm_wakeup (cond, private); + return err; + } + +- /* We have one new user of the condvar. */ +- ++cond->__data.__total_seq; +- ++cond->__data.__futex; +- cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT; +- +- /* Remember the mutex we are using here. If there is already a +- different address store this is a bad user bug. Do not store +- anything for pshared condvars. */ +- if (cond->__data.__mutex != (void *) ~0l) +- cond->__data.__mutex = mutex; +- +- /* Prepare structure passed to cancellation handler. */ +- cbuffer.cond = cond; +- cbuffer.mutex = mutex; +- +- /* Before we block we enable cancellation. Therefore we have to +- install a cancellation handler. */ +- __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer); +- +- /* The current values of the wakeup counter. The "woken" counter +- must exceed this value. */ +- unsigned long long int val; +- unsigned long long int seq; +- val = seq = cond->__data.__wakeup_seq; +- /* Remember the broadcast counter. */ +- cbuffer.bc_seq = cond->__data.__broadcast_seq; ++ /* Now wait until a signal is available in our group or it is closed. ++ Acquire MO so that if we observe a value of zero written after group ++ switching in __condvar_quiesce_and_switch_g1, we synchronize with that ++ store and will see the prior update of __g1_start done while switching ++ groups too. */ ++ unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g); + + do + { +- unsigned int futex_val = cond->__data.__futex; +- /* Prepare to wait. Release the condvar futex. */ +- lll_unlock (cond->__data.__lock, pshared); +- +- /* Enable asynchronous cancellation. Required by the standard. */ +- cbuffer.oldtype = __pthread_enable_asynccancel (); +- +-#if (defined lll_futex_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- /* If pi_flag remained 1 then it means that we had the lock and the mutex +- but a spurious waker raced ahead of us. Give back the mutex before +- going into wait again. */ +- if (pi_flag) ++ while (1) + { +- __pthread_mutex_cond_lock_adjust (mutex); +- __pthread_mutex_unlock_usercnt (mutex, 0); ++ /* Spin-wait first. ++ Note that spinning first without checking whether a timeout ++ passed might lead to what looks like a spurious wake-up even ++ though we should return ETIMEDOUT (e.g., if the caller provides ++ an absolute timeout that is clearly in the past). However, ++ (1) spurious wake-ups are allowed, (2) it seems unlikely that a ++ user will (ab)use pthread_cond_wait as a check for whether a ++ point in time is in the past, and (3) spinning first without ++ having to compare against the current time seems to be the right ++ choice from a performance perspective for most use cases. */ ++ unsigned int spin = maxspin; ++ while (signals == 0 && spin > 0) ++ { ++ /* Check that we are not spinning on a group that's already ++ closed. */ ++ if (seq < (__condvar_load_g1_start_relaxed (cond) >> 1)) ++ goto done; ++ ++ /* TODO Back off. */ ++ ++ /* Reload signals. See above for MO. */ ++ signals = atomic_load_acquire (cond->__data.__g_signals + g); ++ spin--; ++ } ++ ++ /* If our group will be closed as indicated by the flag on signals, ++ don't bother grabbing a signal. */ ++ if (signals & 1) ++ goto done; ++ ++ /* If there is an available signal, don't block. */ ++ if (signals != 0) ++ break; ++ ++ /* No signals available after spinning, so prepare to block. ++ We first acquire a group reference and use acquire MO for that so ++ that we synchronize with the dummy read-modify-write in ++ __condvar_quiesce_and_switch_g1 if we read from that. In turn, ++ in this case this will make us see the closed flag on __g_signals ++ that designates a concurrent attempt to reuse the group's slot. ++ We use acquire MO for the __g_signals check to make the ++ __g1_start check work (see spinning above). ++ Note that the group reference acquisition will not mask the ++ release MO when decrementing the reference count because we use ++ an atomic read-modify-write operation and thus extend the release ++ sequence. */ ++ atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2); ++ if (((atomic_load_acquire (cond->__data.__g_signals + g) & 1) != 0) ++ || (seq < (__condvar_load_g1_start_relaxed (cond) >> 1))) ++ { ++ /* Our group is closed. Wake up any signalers that might be ++ waiting. */ ++ __condvar_dec_grefs (cond, g, private); ++ goto done; ++ } ++ ++ // Now block. ++ struct _pthread_cleanup_buffer buffer; ++ struct _condvar_cleanup_buffer cbuffer; ++ cbuffer.wseq = wseq; ++ cbuffer.cond = cond; ++ cbuffer.mutex = mutex; ++ cbuffer.private = private; ++ __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer); ++ ++ if (abstime == NULL) ++ { ++ /* Block without a timeout. */ ++ err = futex_wait_cancelable ( ++ cond->__data.__g_signals + g, 0, private); ++ } ++ else ++ { ++ /* Block, but with a timeout. ++ Work around the fact that the kernel rejects negative timeout ++ values despite them being valid. */ ++ if (__glibc_unlikely (abstime->tv_sec < 0)) ++ err = ETIMEDOUT; ++ ++ else if ((flags & __PTHREAD_COND_CLOCK_MONOTONIC_MASK) != 0) ++ { ++ /* CLOCK_MONOTONIC is requested. */ ++ struct timespec rt; ++ if (__clock_gettime (CLOCK_MONOTONIC, &rt) != 0) ++ __libc_fatal ("clock_gettime does not support " ++ "CLOCK_MONOTONIC"); ++ /* Convert the absolute timeout value to a relative ++ timeout. */ ++ rt.tv_sec = abstime->tv_sec - rt.tv_sec; ++ rt.tv_nsec = abstime->tv_nsec - rt.tv_nsec; ++ if (rt.tv_nsec < 0) ++ { ++ rt.tv_nsec += 1000000000; ++ --rt.tv_sec; ++ } ++ /* Did we already time out? */ ++ if (__glibc_unlikely (rt.tv_sec < 0)) ++ err = ETIMEDOUT; ++ else ++ err = futex_reltimed_wait_cancelable ++ (cond->__data.__g_signals + g, 0, &rt, private); ++ } ++ else ++ { ++ /* Use CLOCK_REALTIME. */ ++ err = futex_abstimed_wait_cancelable ++ (cond->__data.__g_signals + g, 0, abstime, private); ++ } ++ } ++ ++ __pthread_cleanup_pop (&buffer, 0); ++ ++ if (__glibc_unlikely (err == ETIMEDOUT)) ++ { ++ __condvar_dec_grefs (cond, g, private); ++ /* If we timed out, we effectively cancel waiting. Note that ++ we have decremented __g_refs before cancellation, so that a ++ deadlock between waiting for quiescence of our group in ++ __condvar_quiesce_and_switch_g1 and us trying to acquire ++ the lock during cancellation is not possible. */ ++ __condvar_cancel_waiting (cond, seq, g, private); ++ result = ETIMEDOUT; ++ goto done; ++ } ++ else ++ __condvar_dec_grefs (cond, g, private); ++ ++ /* Reload signals. See above for MO. */ ++ signals = atomic_load_acquire (cond->__data.__g_signals + g); + } +- pi_flag = USE_REQUEUE_PI (mutex); + +- if (pi_flag) ++ } ++ /* Try to grab a signal. Use acquire MO so that we see an up-to-date value ++ of __g1_start below (see spinning above for a similar case). In ++ particular, if we steal from a more recent group, we will also see a ++ more recent __g1_start below. */ ++ while (!atomic_compare_exchange_weak_acquire (cond->__data.__g_signals + g, ++ &signals, signals - 2)); ++ ++ /* We consumed a signal but we could have consumed from a more recent group ++ that aliased with ours due to being in the same group slot. If this ++ might be the case our group must be closed as visible through ++ __g1_start. */ ++ uint64_t g1_start = __condvar_load_g1_start_relaxed (cond); ++ if (seq < (g1_start >> 1)) ++ { ++ /* We potentially stole a signal from a more recent group but we do not ++ know which group we really consumed from. ++ We do not care about groups older than current G1 because they are ++ closed; we could have stolen from these, but then we just add a ++ spurious wake-up for the current groups. ++ We will never steal a signal from current G2 that was really intended ++ for G2 because G2 never receives signals (until it becomes G1). We ++ could have stolen a signal from G2 that was conservatively added by a ++ previous waiter that also thought it stole a signal -- but given that ++ that signal was added unnecessarily, it's not a problem if we steal ++ it. ++ Thus, the remaining case is that we could have stolen from the current ++ G1, where "current" means the __g1_start value we observed. However, ++ if the current G1 does not have the same slot index as we do, we did ++ not steal from it and do not need to undo that. This is the reason ++ for putting a bit with G2's index into__g1_start as well. */ ++ if (((g1_start & 1) ^ 1) == g) + { +- err = lll_futex_wait_requeue_pi (&cond->__data.__futex, +- futex_val, &mutex->__data.__lock, +- pshared); +- +- pi_flag = (err == 0); ++ /* We have to conservatively undo our potential mistake of stealing ++ a signal. We can stop trying to do that when the current G1 ++ changes because other spinning waiters will notice this too and ++ __condvar_quiesce_and_switch_g1 has checked that there are no ++ futex waiters anymore before switching G1. ++ Relaxed MO is fine for the __g1_start load because we need to ++ merely be able to observe this fact and not have to observe ++ something else as well. ++ ??? Would it help to spin for a little while to see whether the ++ current G1 gets closed? This might be worthwhile if the group is ++ small or close to being closed. */ ++ unsigned int s = atomic_load_relaxed (cond->__data.__g_signals + g); ++ while (__condvar_load_g1_start_relaxed (cond) == g1_start) ++ { ++ /* Try to add a signal. We don't need to acquire the lock ++ because at worst we can cause a spurious wake-up. If the ++ group is in the process of being closed (LSB is true), this ++ has an effect similar to us adding a signal. */ ++ if (((s & 1) != 0) ++ || atomic_compare_exchange_weak_relaxed ++ (cond->__data.__g_signals + g, &s, s + 2)) ++ { ++ /* If we added a signal, we also need to add a wake-up on ++ the futex. We also need to do that if we skipped adding ++ a signal because the group is being closed because ++ while __condvar_quiesce_and_switch_g1 could have closed ++ the group, it might stil be waiting for futex waiters to ++ leave (and one of those waiters might be the one we stole ++ the signal from, which cause it to block using the ++ futex). */ ++ futex_wake (cond->__data.__g_signals + g, 1, private); ++ break; ++ } ++ /* TODO Back off. */ ++ } + } +- else +-#endif +- /* Wait until woken by signal or broadcast. */ +- lll_futex_wait (&cond->__data.__futex, futex_val, pshared); +- +- /* Disable asynchronous cancellation. */ +- __pthread_disable_asynccancel (cbuffer.oldtype); +- +- /* We are going to look at shared data again, so get the lock. */ +- lll_lock (cond->__data.__lock, pshared); +- +- /* If a broadcast happened, we are done. */ +- if (cbuffer.bc_seq != cond->__data.__broadcast_seq) +- goto bc_out; +- +- /* Check whether we are eligible for wakeup. */ +- val = cond->__data.__wakeup_seq; + } +- while (val == seq || cond->__data.__woken_seq == val); + +- /* Another thread woken up. */ +- ++cond->__data.__woken_seq; ++ done: + +- bc_out: ++ /* Confirm that we have been woken. We do that before acquiring the mutex ++ to allow for execution of pthread_cond_destroy while having acquired the ++ mutex. */ ++ __condvar_confirm_wakeup (cond, private); + +- cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; +- +- /* If pthread_cond_destroy was called on this varaible already, +- notify the pthread_cond_destroy caller all waiters have left +- and it can be successfully destroyed. */ +- if (cond->__data.__total_seq == -1ULL +- && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) +- lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); ++ /* Woken up; now re-acquire the mutex. If this doesn't fail, return RESULT, ++ which is set to ETIMEDOUT if a timeout occured, or zero otherwise. */ ++ err = __pthread_mutex_cond_lock (mutex); ++ /* XXX Abort on errors that are disallowed by POSIX? */ ++ return (err != 0) ? err : result; ++} + +- /* We are done with the condvar. */ +- lll_unlock (cond->__data.__lock, pshared); + +- /* The cancellation handling is back to normal, remove the handler. */ +- __pthread_cleanup_pop (&buffer, 0); ++/* See __pthread_cond_wait_common. */ ++int ++__pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) ++{ ++ return __pthread_cond_wait_common (cond, mutex, NULL); ++} + +- /* Get the mutex before returning. Not needed for PI. */ +-#if (defined lll_futex_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +- if (pi_flag) +- { +- __pthread_mutex_cond_lock_adjust (mutex); +- return 0; +- } +- else +-#endif +- return __pthread_mutex_cond_lock (mutex); ++/* See __pthread_cond_wait_common. */ ++int ++__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, ++ const struct timespec *abstime) ++{ ++ /* Check parameter validity. This should also tell the compiler that ++ it can assume that abstime is not NULL. */ ++ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) ++ return EINVAL; ++ return __pthread_cond_wait_common (cond, mutex, abstime); + } + + versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, + GLIBC_2_3_2); ++versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, ++ GLIBC_2_3_2); +diff --git a/nptl/pthread_condattr_getclock.c b/nptl/pthread_condattr_getclock.c +index d156302..cecb4aa 100644 +--- a/nptl/pthread_condattr_getclock.c ++++ b/nptl/pthread_condattr_getclock.c +@@ -23,6 +23,6 @@ int + pthread_condattr_getclock (const pthread_condattr_t *attr, clockid_t *clock_id) + { + *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1) +- & ((1 << COND_NWAITERS_SHIFT) - 1)); ++ & ((1 << COND_CLOCK_BITS) - 1)); + return 0; + } +diff --git a/nptl/pthread_condattr_getpshared.c b/nptl/pthread_condattr_getpshared.c +index 5a10f3e..8147966 100644 +--- a/nptl/pthread_condattr_getpshared.c ++++ b/nptl/pthread_condattr_getpshared.c +@@ -22,7 +22,8 @@ + int + pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared) + { +- *pshared = ((const struct pthread_condattr *) attr)->value & 1; ++ *pshared = (((const struct pthread_condattr *) attr)->value & 1 ++ ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE); + + return 0; + } +diff --git a/nptl/pthread_condattr_init.c b/nptl/pthread_condattr_init.c +index 0ce42e5..6e5168d 100644 +--- a/nptl/pthread_condattr_init.c ++++ b/nptl/pthread_condattr_init.c +@@ -23,7 +23,9 @@ + int + __pthread_condattr_init (pthread_condattr_t *attr) + { +- memset (attr, '\0', sizeof (*attr)); ++ struct pthread_condattr *iattr = (struct pthread_condattr *) attr; ++ /* Default is not pshared and CLOCK_REALTIME. */ ++ iattr-> value = CLOCK_REALTIME << 1; + + return 0; + } +diff --git a/nptl/pthread_condattr_setclock.c b/nptl/pthread_condattr_setclock.c +index 25e2a17..3cfad84 100644 +--- a/nptl/pthread_condattr_setclock.c ++++ b/nptl/pthread_condattr_setclock.c +@@ -18,7 +18,7 @@ + + #include <assert.h> + #include <errno.h> +-#include <stdbool.h> ++#include <futex-internal.h> + #include <time.h> + #include <sysdep.h> + #include "pthreadP.h" +@@ -33,12 +33,17 @@ pthread_condattr_setclock (pthread_condattr_t *attr, clockid_t clock_id) + in the pthread_cond_t structure needs to be adjusted. */ + return EINVAL; + ++ /* If we do not support waiting using CLOCK_MONOTONIC, return an error. */ ++ if (clock_id == CLOCK_MONOTONIC ++ && !futex_supports_exact_relative_timeouts()) ++ return ENOTSUP; ++ + /* Make sure the value fits in the bits we reserved. */ +- assert (clock_id < (1 << COND_NWAITERS_SHIFT)); ++ assert (clock_id < (1 << COND_CLOCK_BITS)); + + int *valuep = &((struct pthread_condattr *) attr)->value; + +- *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1)) ++ *valuep = ((*valuep & ~(((1 << COND_CLOCK_BITS) - 1) << 1)) + | (clock_id << 1)); + + return 0; +diff --git a/nptl/test-cond-printers.py b/nptl/test-cond-printers.py +index af0e12e..9e807c9 100644 +--- a/nptl/test-cond-printers.py ++++ b/nptl/test-cond-printers.py +@@ -35,7 +35,7 @@ try: + + break_at(test_source, 'Test status (destroyed)') + continue_cmd() # Go to test_status_destroyed +- test_printer(var, to_string, {'Status': 'Destroyed'}) ++ test_printer(var, to_string, {'Threads known to still execute a wait function': '0'}) + + continue_cmd() # Exit + +diff --git a/nptl/tst-cond1.c b/nptl/tst-cond1.c +index 75ab9c8..509bbd0 100644 +--- a/nptl/tst-cond1.c ++++ b/nptl/tst-cond1.c +@@ -73,6 +73,9 @@ do_test (void) + + puts ("parent: wait for condition"); + ++ /* This test will fail on spurious wake-ups, which are allowed; however, ++ the current implementation shouldn't produce spurious wake-ups in the ++ scenario we are testing here. */ + err = pthread_cond_wait (&cond, &mut); + if (err != 0) + error (EXIT_FAILURE, err, "parent: cannot wait fir signal"); +diff --git a/nptl/tst-cond20.c b/nptl/tst-cond20.c +index 918c4ad..665a66a 100644 +--- a/nptl/tst-cond20.c ++++ b/nptl/tst-cond20.c +@@ -96,7 +96,10 @@ do_test (void) + + for (i = 0; i < ROUNDS; ++i) + { +- pthread_cond_wait (&cond2, &mut); ++ /* Make sure we discard spurious wake-ups. */ ++ do ++ pthread_cond_wait (&cond2, &mut); ++ while (count != N); + + if (i & 1) + pthread_mutex_unlock (&mut); +diff --git a/nptl/tst-cond22.c b/nptl/tst-cond22.c +index bd978e5..64f19ea 100644 +--- a/nptl/tst-cond22.c ++++ b/nptl/tst-cond22.c +@@ -106,10 +106,11 @@ do_test (void) + status = 1; + } + +- printf ("cond = { %d, %x, %lld, %lld, %lld, %p, %u, %u }\n", +- c.__data.__lock, c.__data.__futex, c.__data.__total_seq, +- c.__data.__wakeup_seq, c.__data.__woken_seq, c.__data.__mutex, +- c.__data.__nwaiters, c.__data.__broadcast_seq); ++ printf ("cond = { %llu, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n", ++ c.__data.__wseq, c.__data.__g1_start, ++ c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0], ++ c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1], ++ c.__data.__g1_orig_size, c.__data.__wrefs); + + if (pthread_create (&th, NULL, tf, (void *) 1l) != 0) + { +@@ -148,10 +149,11 @@ do_test (void) + status = 1; + } + +- printf ("cond = { %d, %x, %lld, %lld, %lld, %p, %u, %u }\n", +- c.__data.__lock, c.__data.__futex, c.__data.__total_seq, +- c.__data.__wakeup_seq, c.__data.__woken_seq, c.__data.__mutex, +- c.__data.__nwaiters, c.__data.__broadcast_seq); ++ printf ("cond = { %llu, %llu, %u/%u/%u, %u/%u/%u, %u, %u }\n", ++ c.__data.__wseq, c.__data.__g1_start, ++ c.__data.__g_signals[0], c.__data.__g_refs[0], c.__data.__g_size[0], ++ c.__data.__g_signals[1], c.__data.__g_refs[1], c.__data.__g_size[1], ++ c.__data.__g1_orig_size, c.__data.__wrefs); + + return status; + } +diff --git a/sysdeps/aarch64/nptl/bits/pthreadtypes.h b/sysdeps/aarch64/nptl/bits/pthreadtypes.h +index 13984a7..c6fa632 100644 +--- a/sysdeps/aarch64/nptl/bits/pthreadtypes.h ++++ b/sysdeps/aarch64/nptl/bits/pthreadtypes.h +@@ -90,17 +90,30 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; +- long int __align; ++ __extension__ long long int __align; + } pthread_cond_t; + + typedef union +diff --git a/sysdeps/arm/nptl/bits/pthreadtypes.h b/sysdeps/arm/nptl/bits/pthreadtypes.h +index afb5392..53518c6 100644 +--- a/sysdeps/arm/nptl/bits/pthreadtypes.h ++++ b/sysdeps/arm/nptl/bits/pthreadtypes.h +@@ -93,14 +93,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/ia64/nptl/bits/pthreadtypes.h b/sysdeps/ia64/nptl/bits/pthreadtypes.h +index f2e6dac..e72dbfd 100644 +--- a/sysdeps/ia64/nptl/bits/pthreadtypes.h ++++ b/sysdeps/ia64/nptl/bits/pthreadtypes.h +@@ -90,17 +90,30 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; +- long int __align; ++ __extension__ long long int __align; + } pthread_cond_t; + + typedef union +diff --git a/sysdeps/m68k/nptl/bits/pthreadtypes.h b/sysdeps/m68k/nptl/bits/pthreadtypes.h +index d8faa7a..c5e9021 100644 +--- a/sysdeps/m68k/nptl/bits/pthreadtypes.h ++++ b/sysdeps/m68k/nptl/bits/pthreadtypes.h +@@ -88,19 +88,33 @@ typedef union + + + /* Data structure for conditional variable handling. The structure of +- the attribute type is deliberately not exposed. */ ++ the attribute type is not exposed on purpose. */ + typedef union + { + struct + { +- int __lock __attribute__ ((__aligned__ (4))); +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ /* Enforce proper alignment of fields used as futex words. */ ++ unsigned int __g_refs[2] __attribute__ ((__aligned__ (4))); ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/microblaze/nptl/bits/pthreadtypes.h b/sysdeps/microblaze/nptl/bits/pthreadtypes.h +index 9e9e307..b6623c2 100644 +--- a/sysdeps/microblaze/nptl/bits/pthreadtypes.h ++++ b/sysdeps/microblaze/nptl/bits/pthreadtypes.h +@@ -91,14 +91,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/mips/nptl/bits/pthreadtypes.h b/sysdeps/mips/nptl/bits/pthreadtypes.h +index 68ed94b..7ddc7bf 100644 +--- a/sysdeps/mips/nptl/bits/pthreadtypes.h ++++ b/sysdeps/mips/nptl/bits/pthreadtypes.h +@@ -117,19 +117,32 @@ typedef union + + + /* Data structure for conditional variable handling. The structure of +- the attribute type is deliberately not exposed. */ ++ the attribute type is not exposed on purpose. */ + typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/nios2/nptl/bits/pthreadtypes.h b/sysdeps/nios2/nptl/bits/pthreadtypes.h +index 76076d0..3995e26 100644 +--- a/sysdeps/nios2/nptl/bits/pthreadtypes.h ++++ b/sysdeps/nios2/nptl/bits/pthreadtypes.h +@@ -88,19 +88,32 @@ typedef union + + + /* Data structure for conditional variable handling. The structure of +- the attribute type is deliberately not exposed. */ ++ the attribute type is not exposed on purpose. */ + typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/nptl/internaltypes.h b/sysdeps/nptl/internaltypes.h +index 203c548..31e5a43 100644 +--- a/sysdeps/nptl/internaltypes.h ++++ b/sysdeps/nptl/internaltypes.h +@@ -68,20 +68,13 @@ struct pthread_condattr + { + /* Combination of values: + +- Bit 0 : flag whether conditional variable will be sharable between +- processes. +- +- Bit 1-7: clock ID. */ ++ Bit 0 : flag whether conditional variable will be ++ sharable between processes. ++ Bit 1-COND_CLOCK_BITS: Clock ID. COND_CLOCK_BITS is the number of bits ++ needed to represent the ID of the clock. */ + int value; + }; +- +- +-/* The __NWAITERS field is used as a counter and to house the number +- of bits for other purposes. COND_CLOCK_BITS is the number +- of bits needed to represent the ID of the clock. COND_NWAITERS_SHIFT +- is the number of bits reserved for other purposes like the clock. */ +-#define COND_CLOCK_BITS 1 +-#define COND_NWAITERS_SHIFT 1 ++#define COND_CLOCK_BITS 1 + + + /* Read-write lock variable attribute data structure. */ +diff --git a/sysdeps/nptl/pthread.h b/sysdeps/nptl/pthread.h +index fd0894e..c122446 100644 +--- a/sysdeps/nptl/pthread.h ++++ b/sysdeps/nptl/pthread.h +@@ -183,7 +183,7 @@ enum + + + /* Conditional variable handling. */ +-#define PTHREAD_COND_INITIALIZER { { 0, 0, 0, 0, 0, (void *) 0, 0, 0 } } ++#define PTHREAD_COND_INITIALIZER { { {0}, {0}, {0, 0}, {0, 0}, 0, 0, {0, 0} } } + + + /* Cleanup buffers */ +diff --git a/sysdeps/s390/nptl/bits/pthreadtypes.h b/sysdeps/s390/nptl/bits/pthreadtypes.h +index 40d10fe..4e455ab 100644 +--- a/sysdeps/s390/nptl/bits/pthreadtypes.h ++++ b/sysdeps/s390/nptl/bits/pthreadtypes.h +@@ -142,14 +142,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/sh/nptl/bits/pthreadtypes.h b/sysdeps/sh/nptl/bits/pthreadtypes.h +index 13fbd73..065dd11 100644 +--- a/sysdeps/sh/nptl/bits/pthreadtypes.h ++++ b/sysdeps/sh/nptl/bits/pthreadtypes.h +@@ -93,14 +93,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/tile/nptl/bits/pthreadtypes.h b/sysdeps/tile/nptl/bits/pthreadtypes.h +index 7d68650..c12737f 100644 +--- a/sysdeps/tile/nptl/bits/pthreadtypes.h ++++ b/sysdeps/tile/nptl/bits/pthreadtypes.h +@@ -122,14 +122,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h b/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h +index 1a1779b..d88b045 100644 +--- a/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h ++++ b/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h +@@ -84,19 +84,32 @@ typedef union + + + /* Data structure for conditional variable handling. The structure of +- the attribute type is deliberately not exposed. */ ++ the attribute type is not exposed on purpose. */ + typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/unix/sysv/linux/hppa/internaltypes.h b/sysdeps/unix/sysv/linux/hppa/internaltypes.h +index 651ce2e..d649657 100644 +--- a/sysdeps/unix/sysv/linux/hppa/internaltypes.h ++++ b/sysdeps/unix/sysv/linux/hppa/internaltypes.h +@@ -46,32 +46,38 @@ fails because __initializer is zero, and the structure will be used as + is correctly. */ + + #define cond_compat_clear(var) \ +-({ \ +- int tmp = 0; \ +- var->__data.__lock = 0; \ +- var->__data.__futex = 0; \ +- var->__data.__mutex = NULL; \ +- /* Clear __initializer last, to indicate initialization is done. */ \ +- __asm__ __volatile__ ("stw,ma %1,0(%0)" \ +- : : "r" (&var->__data.__initializer), "r" (tmp) : "memory"); \ ++({ \ ++ int tmp = 0; \ ++ var->__data.__wseq = 0; \ ++ var->__data.__signals_sent = 0; \ ++ var->__data.__confirmed = 0; \ ++ var->__data.__generation = 0; \ ++ var->__data.__mutex = NULL; \ ++ var->__data.__quiescence_waiters = 0; \ ++ var->__data.__clockid = 0; \ ++ /* Clear __initializer last, to indicate initialization is done. */ \ ++ /* This synchronizes-with the acquire load below. */ \ ++ atomic_store_release (&var->__data.__initializer, 0); \ + }) + + #define cond_compat_check_and_clear(var) \ + ({ \ +- int ret; \ +- volatile int *value = &var->__data.__initializer; \ +- if ((ret = atomic_compare_and_exchange_val_acq(value, 2, 1))) \ ++ int v; \ ++ int *value = &var->__data.__initializer; \ ++ /* This synchronizes-with the release store above. */ \ ++ while ((v = atomic_load_acquire (value)) != 0) \ + { \ +- if (ret == 1) \ ++ if (v == 1 \ ++ /* Relaxed MO is fine; it only matters who's first. */ \ ++ && atomic_compare_exchange_acquire_weak_relaxed (value, 1, 2)) \ + { \ +- /* Initialize structure. */ \ ++ /* We're first; initialize structure. */ \ + cond_compat_clear (var); \ ++ break; \ + } \ + else \ +- { \ +- /* Yield until structure is initialized. */ \ +- while (*value == 2) sched_yield (); \ +- } \ ++ /* Yield before we re-check initialization status. */ \ ++ sched_yield (); \ + } \ + }) + +diff --git a/sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c b/sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c +deleted file mode 100644 +index ec6fd23..0000000 +--- a/sysdeps/unix/sysv/linux/hppa/pthread_cond_timedwait.c ++++ /dev/null +@@ -1,41 +0,0 @@ +-/* Copyright (C) 2009-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Carlos O'Donell <carlos@codesourcery.com>, 2009. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library. If not, see +- <http://www.gnu.org/licenses/>. */ +- +-#ifndef INCLUDED_SELF +-# define INCLUDED_SELF +-# include <pthread_cond_timedwait.c> +-#else +-# include <pthread.h> +-# include <pthreadP.h> +-# include <internaltypes.h> +-# include <shlib-compat.h> +-int +-__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, +- const struct timespec *abstime) +-{ +- cond_compat_check_and_clear (cond); +- return __pthread_cond_timedwait_internal (cond, mutex, abstime); +-} +-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, +- GLIBC_2_3_2); +-# undef versioned_symbol +-# define versioned_symbol(lib, local, symbol, version) +-# undef __pthread_cond_timedwait +-# define __pthread_cond_timedwait __pthread_cond_timedwait_internal +-# include_next <pthread_cond_timedwait.c> +-#endif +diff --git a/sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c b/sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c +index 8f02831..0611f7d 100644 +--- a/sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c ++++ b/sysdeps/unix/sysv/linux/hppa/pthread_cond_wait.c +@@ -32,9 +32,22 @@ __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) + } + versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, + GLIBC_2_3_2); ++int ++__pthread_cond_timedwait (cond, mutex, abstime) ++ pthread_cond_t *cond; ++ pthread_mutex_t *mutex; ++ const struct timespec *abstime; ++{ ++ cond_compat_check_and_clear (cond); ++ return __pthread_cond_timedwait_internal (cond, mutex, abstime); ++} ++versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, ++ GLIBC_2_3_2); + # undef versioned_symbol + # define versioned_symbol(lib, local, symbol, version) + # undef __pthread_cond_wait + # define __pthread_cond_wait __pthread_cond_wait_internal ++# undef __pthread_cond_timedwait ++# define __pthread_cond_timedwait __pthread_cond_timedwait_internal + # include_next <pthread_cond_wait.c> + #endif +diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S +deleted file mode 100644 +index f697e5b..0000000 +--- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S ++++ /dev/null +@@ -1,20 +0,0 @@ +-/* Copyright (C) 2003-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2003. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#define HAVE_CMOV 1 +-#include "../pthread_cond_timedwait.S" +diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S +deleted file mode 100644 +index 5996688..0000000 +--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S ++++ /dev/null +@@ -1,241 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <kernel-features.h> +-#include <pthread-pi-defines.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +- .text +- +- /* int pthread_cond_broadcast (pthread_cond_t *cond) */ +- .globl __pthread_cond_broadcast +- .type __pthread_cond_broadcast, @function +- .align 16 +-__pthread_cond_broadcast: +- cfi_startproc +- pushl %ebx +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebx, 0) +- pushl %esi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%esi, 0) +- pushl %edi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%edi, 0) +- pushl %ebp +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebp, 0) +- cfi_remember_state +- +- movl 20(%esp), %ebx +- +- LIBC_PROBE (cond_broadcast, 1, %edx) +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jnz 1f +- +-2: addl $cond_futex, %ebx +- movl total_seq+4-cond_futex(%ebx), %eax +- movl total_seq-cond_futex(%ebx), %ebp +- cmpl wakeup_seq+4-cond_futex(%ebx), %eax +- ja 3f +- jb 4f +- cmpl wakeup_seq-cond_futex(%ebx), %ebp +- jna 4f +- +- /* Cause all currently waiting threads to recognize they are +- woken up. */ +-3: movl %ebp, wakeup_seq-cond_futex(%ebx) +- movl %eax, wakeup_seq-cond_futex+4(%ebx) +- movl %ebp, woken_seq-cond_futex(%ebx) +- movl %eax, woken_seq-cond_futex+4(%ebx) +- addl %ebp, %ebp +- addl $1, broadcast_seq-cond_futex(%ebx) +- movl %ebp, (%ebx) +- +- /* Get the address of the mutex used. */ +- movl dep_mutex-cond_futex(%ebx), %edi +- +- /* Unlock. */ +- LOCK +- subl $1, cond_lock-cond_futex(%ebx) +- jne 7f +- +- /* Don't use requeue for pshared condvars. */ +-8: cmpl $-1, %edi +- je 9f +- +- /* Do not use requeue for pshared condvars. */ +- testl $PS_BIT, MUTEX_KIND(%edi) +- jne 9f +- +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%edi), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- je 81f +- +- /* Wake up all threads. */ +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx +-#else +- movl %gs:PRIVATE_FUTEX, %ecx +- orl $FUTEX_CMP_REQUEUE, %ecx +-#endif +- movl $SYS_futex, %eax +- movl $0x7fffffff, %esi +- movl $1, %edx +- /* Get the address of the futex involved. */ +-# if MUTEX_FUTEX != 0 +- addl $MUTEX_FUTEX, %edi +-# endif +-/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter. +- ENTER_KERNEL */ +- int $0x80 +- +- /* For any kind of error, which mainly is EAGAIN, we try again +- with WAKE. The general test also covers running on old +- kernels. */ +- cmpl $0xfffff001, %eax +- jae 9f +- +-6: xorl %eax, %eax +- popl %ebp +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebp) +- popl %edi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%edi) +- popl %esi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%esi) +- popl %ebx +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebx) +- ret +- +- cfi_restore_state +- +-81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx +- movl $SYS_futex, %eax +- movl $0x7fffffff, %esi +- movl $1, %edx +- /* Get the address of the futex involved. */ +-# if MUTEX_FUTEX != 0 +- addl $MUTEX_FUTEX, %edi +-# endif +- int $0x80 +- +- /* For any kind of error, which mainly is EAGAIN, we try again +- with WAKE. The general test also covers running on old +- kernels. */ +- cmpl $0xfffff001, %eax +- jb 6b +- jmp 9f +- +- /* Initial locking failed. */ +-1: +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 2b +- +- .align 16 +- /* Unlock. */ +-4: LOCK +- subl $1, cond_lock-cond_futex(%ebx) +- je 6b +- +- /* Unlock in loop requires wakeup. */ +-5: leal cond_lock-cond_futex(%ebx), %eax +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 6b +- +- /* Unlock in loop requires wakeup. */ +-7: leal cond_lock-cond_futex(%ebx), %eax +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 8b +- +-9: /* The futex requeue functionality is not available. */ +- movl $0x7fffffff, %edx +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $SYS_futex, %eax +- ENTER_KERNEL +- jmp 6b +- cfi_endproc +- .size __pthread_cond_broadcast, .-__pthread_cond_broadcast +-versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, +- GLIBC_2_3_2) +diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S +deleted file mode 100644 +index 0038775..0000000 +--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S ++++ /dev/null +@@ -1,216 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <kernel-features.h> +-#include <pthread-pi-defines.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +- .text +- +- /* int pthread_cond_signal (pthread_cond_t *cond) */ +- .globl __pthread_cond_signal +- .type __pthread_cond_signal, @function +- .align 16 +-__pthread_cond_signal: +- +- cfi_startproc +- pushl %ebx +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebx, 0) +- pushl %edi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%edi, 0) +- cfi_remember_state +- +- movl 12(%esp), %edi +- +- LIBC_PROBE (cond_signal, 1, %edi) +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%edi) +-#else +- cmpxchgl %edx, cond_lock(%edi) +-#endif +- jnz 1f +- +-2: leal cond_futex(%edi), %ebx +- movl total_seq+4(%edi), %eax +- movl total_seq(%edi), %ecx +- cmpl wakeup_seq+4(%edi), %eax +-#if cond_lock != 0 +- /* Must use leal to preserve the flags. */ +- leal cond_lock(%edi), %edi +-#endif +- ja 3f +- jb 4f +- cmpl wakeup_seq-cond_futex(%ebx), %ecx +- jbe 4f +- +- /* Bump the wakeup number. */ +-3: addl $1, wakeup_seq-cond_futex(%ebx) +- adcl $0, wakeup_seq-cond_futex+4(%ebx) +- addl $1, (%ebx) +- +- /* Wake up one thread. */ +- pushl %esi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%esi, 0) +- pushl %ebp +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebp, 0) +- +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- sete %cl +- je 8f +- +- movl dep_mutex-cond_futex(%ebx), %edx +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%edx), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- je 9f +- +-8: subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE_OP, %ecx +- movl $SYS_futex, %eax +- movl $1, %edx +- movl $1, %esi +- movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp +- /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for +- sysenter. +- ENTER_KERNEL */ +- int $0x80 +- popl %ebp +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebp) +- popl %esi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%esi) +- +- /* For any kind of error, we try again with WAKE. +- The general test also covers running on old kernels. */ +- cmpl $-4095, %eax +- jae 7f +- +-6: xorl %eax, %eax +- popl %edi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%edi) +- popl %ebx +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebx) +- ret +- +- cfi_restore_state +- +-9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx +- movl $SYS_futex, %eax +- movl $1, %edx +- xorl %esi, %esi +- movl dep_mutex-cond_futex(%ebx), %edi +- movl (%ebx), %ebp +- /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for +- sysenter. +- ENTER_KERNEL */ +- int $0x80 +- popl %ebp +- popl %esi +- +- leal -cond_futex(%ebx), %edi +- +- /* For any kind of error, we try again with WAKE. +- The general test also covers running on old kernels. */ +- cmpl $-4095, %eax +- jb 4f +- +-7: +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- orl $FUTEX_WAKE, %ecx +- +- movl $SYS_futex, %eax +- /* %edx should be 1 already from $FUTEX_WAKE_OP syscall. +- movl $1, %edx */ +- ENTER_KERNEL +- +- /* Unlock. Note that at this point %edi always points to +- cond_lock. */ +-4: LOCK +- subl $1, (%edi) +- je 6b +- +- /* Unlock in loop requires wakeup. */ +-5: movl %edi, %eax +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 6b +- +- /* Initial locking failed. */ +-1: +-#if cond_lock == 0 +- movl %edi, %edx +-#else +- leal cond_lock(%edi), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%edi) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 2b +- +- cfi_endproc +- .size __pthread_cond_signal, .-__pthread_cond_signal +-versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, +- GLIBC_2_3_2) +diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S +deleted file mode 100644 +index 6256376..0000000 +--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S ++++ /dev/null +@@ -1,974 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <pthread-errnos.h> +-#include <pthread-pi-defines.h> +-#include <kernel-features.h> +-#include <stap-probe.h> +- +- .text +- +-/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, +- const struct timespec *abstime) */ +- .globl __pthread_cond_timedwait +- .type __pthread_cond_timedwait, @function +- .align 16 +-__pthread_cond_timedwait: +-.LSTARTCODE: +- cfi_startproc +-#ifdef SHARED +- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, +- DW.ref.__gcc_personality_v0) +- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +-#else +- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) +- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +-#endif +- +- pushl %ebp +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebp, 0) +- pushl %edi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%edi, 0) +- pushl %esi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%esi, 0) +- pushl %ebx +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebx, 0) +- +- movl 20(%esp), %ebx +- movl 28(%esp), %ebp +- +- LIBC_PROBE (cond_timedwait, 3, %ebx, 24(%esp), %ebp) +- +- cmpl $1000000000, 4(%ebp) +- movl $EINVAL, %eax +- jae 18f +- +- /* Stack frame: +- +- esp + 32 +- +--------------------------+ +- esp + 24 | timeout value | +- +--------------------------+ +- esp + 20 | futex pointer | +- +--------------------------+ +- esp + 16 | pi-requeued flag | +- +--------------------------+ +- esp + 12 | old broadcast_seq value | +- +--------------------------+ +- esp + 4 | old wake_seq value | +- +--------------------------+ +- esp + 0 | old cancellation mode | +- +--------------------------+ +- */ +- +-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME +-# ifdef PIC +- LOAD_PIC_REG (cx) +- cmpl $0, __have_futex_clock_realtime@GOTOFF(%ecx) +-# else +- cmpl $0, __have_futex_clock_realtime +-# endif +- je .Lreltmo +-#endif +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jnz 1f +- +- /* Store the reference to the mutex. If there is already a +- different value in there this is a bad user bug. */ +-2: cmpl $-1, dep_mutex(%ebx) +- movl 24(%esp), %eax +- je 17f +- movl %eax, dep_mutex(%ebx) +- +- /* Unlock the mutex. */ +-17: xorl %edx, %edx +- call __pthread_mutex_unlock_usercnt +- +- testl %eax, %eax +- jne 16f +- +- addl $1, total_seq(%ebx) +- adcl $0, total_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +-#ifdef __ASSUME_FUTEX_CLOCK_REALTIME +-# define FRAME_SIZE 24 +-#else +-# define FRAME_SIZE 32 +-#endif +- subl $FRAME_SIZE, %esp +- cfi_adjust_cfa_offset(FRAME_SIZE) +- cfi_remember_state +- +- /* Get and store current wakeup_seq value. */ +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- movl broadcast_seq(%ebx), %eax +- movl %edi, 4(%esp) +- movl %edx, 8(%esp) +- movl %eax, 12(%esp) +- +- /* Reset the pi-requeued flag. */ +- movl $0, 16(%esp) +- +- cmpl $0, (%ebp) +- movl $-ETIMEDOUT, %esi +- js 6f +- +-8: movl cond_futex(%ebx), %edi +- movl %edi, 20(%esp) +- +- /* Unlock. */ +- LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 3f +- +-.LcleanupSTART: +-4: call __pthread_enable_asynccancel +- movl %eax, (%esp) +- +- leal (%ebp), %esi +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- sete %cl +- je 40f +- +- movl dep_mutex(%ebx), %edi +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%edi), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 40f +- +- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx +- /* The following only works like this because we only support +- two clocks, represented using a single bit. */ +- testl $1, cond_nwaiters(%ebx) +- /* XXX Need to implement using sete instead of a jump. */ +- jne 42f +- orl $FUTEX_CLOCK_REALTIME, %ecx +- +-42: movl 20(%esp), %edx +- addl $cond_futex, %ebx +-.Ladd_cond_futex_pi: +- movl $SYS_futex, %eax +- ENTER_KERNEL +- subl $cond_futex, %ebx +-.Lsub_cond_futex_pi: +- movl %eax, %esi +- /* Set the pi-requeued flag only if the kernel has returned 0. The +- kernel does not hold the mutex on ETIMEDOUT or any other error. */ +- cmpl $0, %eax +- sete 16(%esp) +- je 41f +- +- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns +- successfully, it has already locked the mutex for us and the +- pi_flag (16(%esp)) is set to denote that fact. However, if another +- thread changed the futex value before we entered the wait, the +- syscall may return an EAGAIN and the mutex is not locked. We go +- ahead with a success anyway since later we look at the pi_flag to +- decide if we got the mutex or not. The sequence numbers then make +- sure that only one of the threads actually wake up. We retry using +- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal +- and PI futexes don't mix. +- +- Note that we don't check for EAGAIN specifically; we assume that the +- only other error the futex function could return is EAGAIN (barring +- the ETIMEOUT of course, for the timeout case in futex) since +- anything else would mean an error in our function. It is too +- expensive to do that check for every call (which is quite common in +- case of a large number of threads), so it has been skipped. */ +- cmpl $-ENOSYS, %eax +- jne 41f +- xorl %ecx, %ecx +- +-40: subl $1, %ecx +- movl $0, 16(%esp) +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAIT_BITSET, %ecx +- /* The following only works like this because we only support +- two clocks, represented using a single bit. */ +- testl $1, cond_nwaiters(%ebx) +- jne 30f +- orl $FUTEX_CLOCK_REALTIME, %ecx +-30: +- movl 20(%esp), %edx +- movl $0xffffffff, %ebp +- addl $cond_futex, %ebx +-.Ladd_cond_futex: +- movl $SYS_futex, %eax +- ENTER_KERNEL +- subl $cond_futex, %ebx +-.Lsub_cond_futex: +- movl 28+FRAME_SIZE(%esp), %ebp +- movl %eax, %esi +- +-41: movl (%esp), %eax +- call __pthread_disable_asynccancel +-.LcleanupEND: +- +- /* Lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jnz 5f +- +-6: movl broadcast_seq(%ebx), %eax +- cmpl 12(%esp), %eax +- jne 23f +- +- movl woken_seq(%ebx), %eax +- movl woken_seq+4(%ebx), %ecx +- +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- +- cmpl 8(%esp), %edx +- jne 7f +- cmpl 4(%esp), %edi +- je 15f +- +-7: cmpl %ecx, %edx +- jne 9f +- cmp %eax, %edi +- jne 9f +- +-15: cmpl $-ETIMEDOUT, %esi +- je 28f +- +- /* We need to go back to futex_wait. If we're using requeue_pi, then +- release the mutex we had acquired and go back. */ +- movl 16(%esp), %edx +- test %edx, %edx +- jz 8b +- +- /* Adjust the mutex values first and then unlock it. The unlock +- should always succeed or else the kernel did not lock the mutex +- correctly. */ +- movl dep_mutex(%ebx), %eax +- call __pthread_mutex_cond_lock_adjust +- movl dep_mutex(%ebx), %eax +- xorl %edx, %edx +- call __pthread_mutex_unlock_usercnt +- jmp 8b +- +-28: addl $1, wakeup_seq(%ebx) +- adcl $0, wakeup_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- movl $ETIMEDOUT, %esi +- jmp 14f +- +-23: xorl %esi, %esi +- jmp 24f +- +-9: xorl %esi, %esi +-14: addl $1, woken_seq(%ebx) +- adcl $0, woken_seq+4(%ebx) +- +-24: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- movl total_seq(%ebx), %eax +- andl total_seq+4(%ebx), %eax +- cmpl $0xffffffff, %eax +- jne 25f +- movl cond_nwaiters(%ebx), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 25f +- +- addl $cond_nwaiters, %ebx +- movl $SYS_futex, %eax +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_nwaiters(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $1, %edx +- ENTER_KERNEL +- subl $cond_nwaiters, %ebx +- +-25: LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 10f +- +-11: movl 24+FRAME_SIZE(%esp), %eax +- /* With requeue_pi, the mutex lock is held in the kernel. */ +- movl 16(%esp), %ecx +- testl %ecx, %ecx +- jnz 27f +- +- call __pthread_mutex_cond_lock +-26: addl $FRAME_SIZE, %esp +- cfi_adjust_cfa_offset(-FRAME_SIZE) +- +- /* We return the result of the mutex_lock operation if it failed. */ +- testl %eax, %eax +-#ifdef HAVE_CMOV +- cmovel %esi, %eax +-#else +- jne 22f +- movl %esi, %eax +-22: +-#endif +- +-18: popl %ebx +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebx) +- popl %esi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%esi) +- popl %edi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%edi) +- popl %ebp +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebp) +- +- ret +- +- cfi_restore_state +- +-27: call __pthread_mutex_cond_lock_adjust +- xorl %eax, %eax +- jmp 26b +- +- cfi_adjust_cfa_offset(-FRAME_SIZE); +- /* Initial locking failed. */ +-1: +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 2b +- +- /* The initial unlocking of the mutex failed. */ +-16: +- LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 18b +- +- movl %eax, %esi +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- +- movl %esi, %eax +- jmp 18b +- +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +- /* Unlock in loop requires wakeup. */ +-3: +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 4b +- +- /* Locking in loop failed. */ +-5: +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 6b +- +- /* Unlock after loop requires wakeup. */ +-10: +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 11b +- +-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME +- cfi_adjust_cfa_offset(-FRAME_SIZE) +-.Lreltmo: +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-# if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-# else +- cmpxchgl %edx, cond_lock(%ebx) +-# endif +- jnz 101f +- +- /* Store the reference to the mutex. If there is already a +- different value in there this is a bad user bug. */ +-102: cmpl $-1, dep_mutex(%ebx) +- movl 24(%esp), %eax +- je 117f +- movl %eax, dep_mutex(%ebx) +- +- /* Unlock the mutex. */ +-117: xorl %edx, %edx +- call __pthread_mutex_unlock_usercnt +- +- testl %eax, %eax +- jne 16b +- +- addl $1, total_seq(%ebx) +- adcl $0, total_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +- subl $FRAME_SIZE, %esp +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +- /* Get and store current wakeup_seq value. */ +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- movl broadcast_seq(%ebx), %eax +- movl %edi, 4(%esp) +- movl %edx, 8(%esp) +- movl %eax, 12(%esp) +- +- /* Reset the pi-requeued flag. */ +- movl $0, 16(%esp) +- +- /* Get the current time. */ +-108: movl %ebx, %edx +-# ifdef __NR_clock_gettime +- /* Get the clock number. */ +- movl cond_nwaiters(%ebx), %ebx +- andl $((1 << nwaiters_shift) - 1), %ebx +- /* Only clocks 0 and 1 are allowed so far. Both are handled in the +- kernel. */ +- leal 24(%esp), %ecx +- movl $__NR_clock_gettime, %eax +- ENTER_KERNEL +- movl %edx, %ebx +- +- /* Compute relative timeout. */ +- movl (%ebp), %ecx +- movl 4(%ebp), %edx +- subl 24(%esp), %ecx +- subl 28(%esp), %edx +-# else +- /* Get the current time. */ +- leal 24(%esp), %ebx +- xorl %ecx, %ecx +- movl $__NR_gettimeofday, %eax +- ENTER_KERNEL +- movl %edx, %ebx +- +- /* Compute relative timeout. */ +- movl 28(%esp), %eax +- movl $1000, %edx +- mul %edx /* Milli seconds to nano seconds. */ +- movl (%ebp), %ecx +- movl 4(%ebp), %edx +- subl 24(%esp), %ecx +- subl %eax, %edx +-# endif +- jns 112f +- addl $1000000000, %edx +- subl $1, %ecx +-112: testl %ecx, %ecx +- movl $-ETIMEDOUT, %esi +- js 106f +- +- /* Store relative timeout. */ +-121: movl %ecx, 24(%esp) +- movl %edx, 28(%esp) +- +- movl cond_futex(%ebx), %edi +- movl %edi, 20(%esp) +- +- /* Unlock. */ +- LOCK +-# if cond_lock == 0 +- subl $1, (%ebx) +-# else +- subl $1, cond_lock(%ebx) +-# endif +- jne 103f +- +-.LcleanupSTART2: +-104: call __pthread_enable_asynccancel +- movl %eax, (%esp) +- +- leal 24(%esp), %esi +-# if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-# endif +- cmpl $-1, dep_mutex(%ebx) +- sete %cl +- subl $1, %ecx +-# ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-# else +- andl %gs:PRIVATE_FUTEX, %ecx +-# endif +-# if FUTEX_WAIT != 0 +- addl $FUTEX_WAIT, %ecx +-# endif +- movl 20(%esp), %edx +- addl $cond_futex, %ebx +-.Ladd_cond_futex2: +- movl $SYS_futex, %eax +- ENTER_KERNEL +- subl $cond_futex, %ebx +-.Lsub_cond_futex2: +- movl %eax, %esi +- +-141: movl (%esp), %eax +- call __pthread_disable_asynccancel +-.LcleanupEND2: +- +- +- /* Lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-# if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-# else +- cmpxchgl %edx, cond_lock(%ebx) +-# endif +- jnz 105f +- +-106: movl broadcast_seq(%ebx), %eax +- cmpl 12(%esp), %eax +- jne 23b +- +- movl woken_seq(%ebx), %eax +- movl woken_seq+4(%ebx), %ecx +- +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- +- cmpl 8(%esp), %edx +- jne 107f +- cmpl 4(%esp), %edi +- je 115f +- +-107: cmpl %ecx, %edx +- jne 9b +- cmp %eax, %edi +- jne 9b +- +-115: cmpl $-ETIMEDOUT, %esi +- je 28b +- +- jmp 8b +- +- cfi_adjust_cfa_offset(-FRAME_SIZE) +- /* Initial locking failed. */ +-101: +-# if cond_lock == 0 +- movl %ebx, %edx +-# else +- leal cond_lock(%ebx), %edx +-# endif +-# if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-# endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-# if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-# endif +- call __lll_lock_wait +- jmp 102b +- +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +- /* Unlock in loop requires wakeup. */ +-103: +-# if cond_lock == 0 +- movl %ebx, %eax +-# else +- leal cond_lock(%ebx), %eax +-# endif +-# if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-# endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-# if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-# endif +- call __lll_unlock_wake +- jmp 104b +- +- /* Locking in loop failed. */ +-105: +-# if cond_lock == 0 +- movl %ebx, %edx +-# else +- leal cond_lock(%ebx), %edx +-# endif +-# if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-# endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-# if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-# endif +- call __lll_lock_wait +- jmp 106b +-#endif +- +- .size __pthread_cond_timedwait, .-__pthread_cond_timedwait +-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, +- GLIBC_2_3_2) +- +- +- .type __condvar_tw_cleanup2, @function +-__condvar_tw_cleanup2: +- subl $cond_futex, %ebx +- .size __condvar_tw_cleanup2, .-__condvar_tw_cleanup2 +- .type __condvar_tw_cleanup, @function +-__condvar_tw_cleanup: +- movl %eax, %esi +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jz 1f +- +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- +-1: movl broadcast_seq(%ebx), %eax +- cmpl 12(%esp), %eax +- jne 3f +- +- /* We increment the wakeup_seq counter only if it is lower than +- total_seq. If this is not the case the thread was woken and +- then canceled. In this case we ignore the signal. */ +- movl total_seq(%ebx), %eax +- movl total_seq+4(%ebx), %edi +- cmpl wakeup_seq+4(%ebx), %edi +- jb 6f +- ja 7f +- cmpl wakeup_seq(%ebx), %eax +- jbe 7f +- +-6: addl $1, wakeup_seq(%ebx) +- adcl $0, wakeup_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- +-7: addl $1, woken_seq(%ebx) +- adcl $0, woken_seq+4(%ebx) +- +-3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- xorl %edi, %edi +- movl total_seq(%ebx), %eax +- andl total_seq+4(%ebx), %eax +- cmpl $0xffffffff, %eax +- jne 4f +- movl cond_nwaiters(%ebx), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 4f +- +- addl $cond_nwaiters, %ebx +- movl $SYS_futex, %eax +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_nwaiters(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $1, %edx +- ENTER_KERNEL +- subl $cond_nwaiters, %ebx +- movl $1, %edi +- +-4: LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- je 2f +- +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- +- /* Wake up all waiters to make sure no signal gets lost. */ +-2: testl %edi, %edi +- jnz 5f +- addl $cond_futex, %ebx +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $SYS_futex, %eax +- movl $0x7fffffff, %edx +- ENTER_KERNEL +- +- /* Lock the mutex only if we don't own it already. This only happens +- in case of PI mutexes, if we got cancelled after a successful +- return of the futex syscall and before disabling async +- cancellation. */ +-5: movl 24+FRAME_SIZE(%esp), %eax +- movl MUTEX_KIND(%eax), %ebx +- andl $(ROBUST_BIT|PI_BIT), %ebx +- cmpl $PI_BIT, %ebx +- jne 8f +- +- movl (%eax), %ebx +- andl $TID_MASK, %ebx +- cmpl %ebx, %gs:TID +- jne 8f +- /* We managed to get the lock. Fix it up before returning. */ +- call __pthread_mutex_cond_lock_adjust +- jmp 9f +- +-8: call __pthread_mutex_cond_lock +- +-9: movl %esi, (%esp) +-.LcallUR: +- call _Unwind_Resume +- hlt +-.LENDCODE: +- cfi_endproc +- .size __condvar_tw_cleanup, .-__condvar_tw_cleanup +- +- +- .section .gcc_except_table,"a",@progbits +-.LexceptSTART: +- .byte DW_EH_PE_omit # @LPStart format (omit) +- .byte DW_EH_PE_omit # @TType format (omit) +- .byte DW_EH_PE_sdata4 # call-site format +- # DW_EH_PE_sdata4 +- .uleb128 .Lcstend-.Lcstbegin +-.Lcstbegin: +- .long .LcleanupSTART-.LSTARTCODE +- .long .Ladd_cond_futex_pi-.LcleanupSTART +- .long __condvar_tw_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .Ladd_cond_futex_pi-.LSTARTCODE +- .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi +- .long __condvar_tw_cleanup2-.LSTARTCODE +- .uleb128 0 +- .long .Lsub_cond_futex_pi-.LSTARTCODE +- .long .Ladd_cond_futex-.Lsub_cond_futex_pi +- .long __condvar_tw_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .Ladd_cond_futex-.LSTARTCODE +- .long .Lsub_cond_futex-.Ladd_cond_futex +- .long __condvar_tw_cleanup2-.LSTARTCODE +- .uleb128 0 +- .long .Lsub_cond_futex-.LSTARTCODE +- .long .LcleanupEND-.Lsub_cond_futex +- .long __condvar_tw_cleanup-.LSTARTCODE +- .uleb128 0 +-#ifndef __ASSUME_FUTEX_CLOCK_REALTIME +- .long .LcleanupSTART2-.LSTARTCODE +- .long .Ladd_cond_futex2-.LcleanupSTART2 +- .long __condvar_tw_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .Ladd_cond_futex2-.LSTARTCODE +- .long .Lsub_cond_futex2-.Ladd_cond_futex2 +- .long __condvar_tw_cleanup2-.LSTARTCODE +- .uleb128 0 +- .long .Lsub_cond_futex2-.LSTARTCODE +- .long .LcleanupEND2-.Lsub_cond_futex2 +- .long __condvar_tw_cleanup-.LSTARTCODE +- .uleb128 0 +-#endif +- .long .LcallUR-.LSTARTCODE +- .long .LENDCODE-.LcallUR +- .long 0 +- .uleb128 0 +-.Lcstend: +- +- +-#ifdef SHARED +- .hidden DW.ref.__gcc_personality_v0 +- .weak DW.ref.__gcc_personality_v0 +- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits +- .align 4 +- .type DW.ref.__gcc_personality_v0, @object +- .size DW.ref.__gcc_personality_v0, 4 +-DW.ref.__gcc_personality_v0: +- .long __gcc_personality_v0 +-#endif +diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S +deleted file mode 100644 +index 5016718..0000000 +--- a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S ++++ /dev/null +@@ -1,642 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <tcb-offsets.h> +-#include <pthread-errnos.h> +-#include <pthread-pi-defines.h> +-#include <kernel-features.h> +-#include <stap-probe.h> +- +- +- .text +- +-/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */ +- .globl __pthread_cond_wait +- .type __pthread_cond_wait, @function +- .align 16 +-__pthread_cond_wait: +-.LSTARTCODE: +- cfi_startproc +-#ifdef SHARED +- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, +- DW.ref.__gcc_personality_v0) +- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +-#else +- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) +- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +-#endif +- +- pushl %ebp +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebp, 0) +- pushl %edi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%edi, 0) +- pushl %esi +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%esi, 0) +- pushl %ebx +- cfi_adjust_cfa_offset(4) +- cfi_rel_offset(%ebx, 0) +- +- xorl %esi, %esi +- movl 20(%esp), %ebx +- +- LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx) +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jnz 1f +- +- /* Store the reference to the mutex. If there is already a +- different value in there this is a bad user bug. */ +-2: cmpl $-1, dep_mutex(%ebx) +- movl 24(%esp), %eax +- je 15f +- movl %eax, dep_mutex(%ebx) +- +- /* Unlock the mutex. */ +-15: xorl %edx, %edx +- call __pthread_mutex_unlock_usercnt +- +- testl %eax, %eax +- jne 12f +- +- addl $1, total_seq(%ebx) +- adcl $0, total_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +-#define FRAME_SIZE 20 +- subl $FRAME_SIZE, %esp +- cfi_adjust_cfa_offset(FRAME_SIZE) +- cfi_remember_state +- +- /* Get and store current wakeup_seq value. */ +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- movl broadcast_seq(%ebx), %eax +- movl %edi, 4(%esp) +- movl %edx, 8(%esp) +- movl %eax, 12(%esp) +- +- /* Reset the pi-requeued flag. */ +-8: movl $0, 16(%esp) +- movl cond_futex(%ebx), %ebp +- +- /* Unlock. */ +- LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 3f +- +-.LcleanupSTART: +-4: call __pthread_enable_asynccancel +- movl %eax, (%esp) +- +- xorl %ecx, %ecx +- cmpl $-1, dep_mutex(%ebx) +- sete %cl +- je 18f +- +- movl dep_mutex(%ebx), %edi +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%edi), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 18f +- +- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx +- movl %ebp, %edx +- xorl %esi, %esi +- addl $cond_futex, %ebx +-.Ladd_cond_futex_pi: +- movl $SYS_futex, %eax +- ENTER_KERNEL +- subl $cond_futex, %ebx +-.Lsub_cond_futex_pi: +- /* Set the pi-requeued flag only if the kernel has returned 0. The +- kernel does not hold the mutex on error. */ +- cmpl $0, %eax +- sete 16(%esp) +- je 19f +- +- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns +- successfully, it has already locked the mutex for us and the +- pi_flag (16(%esp)) is set to denote that fact. However, if another +- thread changed the futex value before we entered the wait, the +- syscall may return an EAGAIN and the mutex is not locked. We go +- ahead with a success anyway since later we look at the pi_flag to +- decide if we got the mutex or not. The sequence numbers then make +- sure that only one of the threads actually wake up. We retry using +- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal +- and PI futexes don't mix. +- +- Note that we don't check for EAGAIN specifically; we assume that the +- only other error the futex function could return is EAGAIN since +- anything else would mean an error in our function. It is too +- expensive to do that check for every call (which is quite common in +- case of a large number of threads), so it has been skipped. */ +- cmpl $-ENOSYS, %eax +- jne 19f +- xorl %ecx, %ecx +- +-18: subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +-#if FUTEX_WAIT != 0 +- addl $FUTEX_WAIT, %ecx +-#endif +- movl %ebp, %edx +- addl $cond_futex, %ebx +-.Ladd_cond_futex: +- movl $SYS_futex, %eax +- ENTER_KERNEL +- subl $cond_futex, %ebx +-.Lsub_cond_futex: +- +-19: movl (%esp), %eax +- call __pthread_disable_asynccancel +-.LcleanupEND: +- +- /* Lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jnz 5f +- +-6: movl broadcast_seq(%ebx), %eax +- cmpl 12(%esp), %eax +- jne 16f +- +- movl woken_seq(%ebx), %eax +- movl woken_seq+4(%ebx), %ecx +- +- movl wakeup_seq(%ebx), %edi +- movl wakeup_seq+4(%ebx), %edx +- +- cmpl 8(%esp), %edx +- jne 7f +- cmpl 4(%esp), %edi +- je 22f +- +-7: cmpl %ecx, %edx +- jne 9f +- cmp %eax, %edi +- je 22f +- +-9: addl $1, woken_seq(%ebx) +- adcl $0, woken_seq+4(%ebx) +- +- /* Unlock */ +-16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- movl total_seq(%ebx), %eax +- andl total_seq+4(%ebx), %eax +- cmpl $0xffffffff, %eax +- jne 17f +- movl cond_nwaiters(%ebx), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 17f +- +- addl $cond_nwaiters, %ebx +- movl $SYS_futex, %eax +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_nwaiters(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $1, %edx +- ENTER_KERNEL +- subl $cond_nwaiters, %ebx +- +-17: LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 10f +- +- /* With requeue_pi, the mutex lock is held in the kernel. */ +-11: movl 24+FRAME_SIZE(%esp), %eax +- movl 16(%esp), %ecx +- testl %ecx, %ecx +- jnz 21f +- +- call __pthread_mutex_cond_lock +-20: addl $FRAME_SIZE, %esp +- cfi_adjust_cfa_offset(-FRAME_SIZE); +- +-14: popl %ebx +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebx) +- popl %esi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%esi) +- popl %edi +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%edi) +- popl %ebp +- cfi_adjust_cfa_offset(-4) +- cfi_restore(%ebp) +- +- /* We return the result of the mutex_lock operation. */ +- ret +- +- cfi_restore_state +- +-21: call __pthread_mutex_cond_lock_adjust +- xorl %eax, %eax +- jmp 20b +- +- cfi_adjust_cfa_offset(-FRAME_SIZE); +- +- /* We need to go back to futex_wait. If we're using requeue_pi, then +- release the mutex we had acquired and go back. */ +-22: movl 16(%esp), %edx +- test %edx, %edx +- jz 8b +- +- /* Adjust the mutex values first and then unlock it. The unlock +- should always succeed or else the kernel did not lock the mutex +- correctly. */ +- movl dep_mutex(%ebx), %eax +- call __pthread_mutex_cond_lock_adjust +- movl dep_mutex(%ebx), %eax +- xorl %edx, %edx +- call __pthread_mutex_unlock_usercnt +- jmp 8b +- +- /* Initial locking failed. */ +-1: +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 2b +- +- /* The initial unlocking of the mutex failed. */ +-12: +- LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- jne 14b +- +- movl %eax, %esi +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- +- movl %esi, %eax +- jmp 14b +- +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +- /* Unlock in loop requires wakeup. */ +-3: +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 4b +- +- /* Locking in loop failed. */ +-5: +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- jmp 6b +- +- /* Unlock after loop requires wakeup. */ +-10: +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- jmp 11b +- +- .size __pthread_cond_wait, .-__pthread_cond_wait +-versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, +- GLIBC_2_3_2) +- +- +- .type __condvar_w_cleanup2, @function +-__condvar_w_cleanup2: +- subl $cond_futex, %ebx +- .size __condvar_w_cleanup2, .-__condvar_w_cleanup2 +-.LSbl4: +- .type __condvar_w_cleanup, @function +-__condvar_w_cleanup: +- movl %eax, %esi +- +- /* Get internal lock. */ +- movl $1, %edx +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %edx, (%ebx) +-#else +- cmpxchgl %edx, cond_lock(%ebx) +-#endif +- jz 1f +- +-#if cond_lock == 0 +- movl %ebx, %edx +-#else +- leal cond_lock(%ebx), %edx +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_lock_wait +- +-1: movl broadcast_seq(%ebx), %eax +- cmpl 12(%esp), %eax +- jne 3f +- +- /* We increment the wakeup_seq counter only if it is lower than +- total_seq. If this is not the case the thread was woken and +- then canceled. In this case we ignore the signal. */ +- movl total_seq(%ebx), %eax +- movl total_seq+4(%ebx), %edi +- cmpl wakeup_seq+4(%ebx), %edi +- jb 6f +- ja 7f +- cmpl wakeup_seq(%ebx), %eax +- jbe 7f +- +-6: addl $1, wakeup_seq(%ebx) +- adcl $0, wakeup_seq+4(%ebx) +- addl $1, cond_futex(%ebx) +- +-7: addl $1, woken_seq(%ebx) +- adcl $0, woken_seq+4(%ebx) +- +-3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- xorl %edi, %edi +- movl total_seq(%ebx), %eax +- andl total_seq+4(%ebx), %eax +- cmpl $0xffffffff, %eax +- jne 4f +- movl cond_nwaiters(%ebx), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 4f +- +- addl $cond_nwaiters, %ebx +- movl $SYS_futex, %eax +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_nwaiters(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $1, %edx +- ENTER_KERNEL +- subl $cond_nwaiters, %ebx +- movl $1, %edi +- +-4: LOCK +-#if cond_lock == 0 +- subl $1, (%ebx) +-#else +- subl $1, cond_lock(%ebx) +-#endif +- je 2f +- +-#if cond_lock == 0 +- movl %ebx, %eax +-#else +- leal cond_lock(%ebx), %eax +-#endif +-#if (LLL_SHARED-LLL_PRIVATE) > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex(%ebx) +- setne %cl +- subl $1, %ecx +- andl $(LLL_SHARED-LLL_PRIVATE), %ecx +-#if LLL_PRIVATE != 0 +- addl $LLL_PRIVATE, %ecx +-#endif +- call __lll_unlock_wake +- +- /* Wake up all waiters to make sure no signal gets lost. */ +-2: testl %edi, %edi +- jnz 5f +- addl $cond_futex, %ebx +-#if FUTEX_PRIVATE_FLAG > 255 +- xorl %ecx, %ecx +-#endif +- cmpl $-1, dep_mutex-cond_futex(%ebx) +- sete %cl +- subl $1, %ecx +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %ecx +-#else +- andl %gs:PRIVATE_FUTEX, %ecx +-#endif +- addl $FUTEX_WAKE, %ecx +- movl $SYS_futex, %eax +- movl $0x7fffffff, %edx +- ENTER_KERNEL +- +- /* Lock the mutex only if we don't own it already. This only happens +- in case of PI mutexes, if we got cancelled after a successful +- return of the futex syscall and before disabling async +- cancellation. */ +-5: movl 24+FRAME_SIZE(%esp), %eax +- movl MUTEX_KIND(%eax), %ebx +- andl $(ROBUST_BIT|PI_BIT), %ebx +- cmpl $PI_BIT, %ebx +- jne 8f +- +- movl (%eax), %ebx +- andl $TID_MASK, %ebx +- cmpl %ebx, %gs:TID +- jne 8f +- /* We managed to get the lock. Fix it up before returning. */ +- call __pthread_mutex_cond_lock_adjust +- jmp 9f +- +-8: call __pthread_mutex_cond_lock +- +-9: movl %esi, (%esp) +-.LcallUR: +- call _Unwind_Resume +- hlt +-.LENDCODE: +- cfi_endproc +- .size __condvar_w_cleanup, .-__condvar_w_cleanup +- +- +- .section .gcc_except_table,"a",@progbits +-.LexceptSTART: +- .byte DW_EH_PE_omit # @LPStart format (omit) +- .byte DW_EH_PE_omit # @TType format (omit) +- .byte DW_EH_PE_sdata4 # call-site format +- # DW_EH_PE_sdata4 +- .uleb128 .Lcstend-.Lcstbegin +-.Lcstbegin: +- .long .LcleanupSTART-.LSTARTCODE +- .long .Ladd_cond_futex_pi-.LcleanupSTART +- .long __condvar_w_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .Ladd_cond_futex_pi-.LSTARTCODE +- .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi +- .long __condvar_w_cleanup2-.LSTARTCODE +- .uleb128 0 +- .long .Lsub_cond_futex_pi-.LSTARTCODE +- .long .Ladd_cond_futex-.Lsub_cond_futex_pi +- .long __condvar_w_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .Ladd_cond_futex-.LSTARTCODE +- .long .Lsub_cond_futex-.Ladd_cond_futex +- .long __condvar_w_cleanup2-.LSTARTCODE +- .uleb128 0 +- .long .Lsub_cond_futex-.LSTARTCODE +- .long .LcleanupEND-.Lsub_cond_futex +- .long __condvar_w_cleanup-.LSTARTCODE +- .uleb128 0 +- .long .LcallUR-.LSTARTCODE +- .long .LENDCODE-.LcallUR +- .long 0 +- .uleb128 0 +-.Lcstend: +- +-#ifdef SHARED +- .hidden DW.ref.__gcc_personality_v0 +- .weak DW.ref.__gcc_personality_v0 +- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits +- .align 4 +- .type DW.ref.__gcc_personality_v0, @object +- .size DW.ref.__gcc_personality_v0, 4 +-DW.ref.__gcc_personality_v0: +- .long __gcc_personality_v0 +-#endif +diff --git a/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h b/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h +index 345e79a..371bc3c 100644 +--- a/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h ++++ b/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h +@@ -123,19 +123,32 @@ typedef union + + + /* Data structure for conditional variable handling. The structure of +- the attribute type is deliberately not exposed. */ ++ the attribute type is not exposed on purpose. */ + typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S +deleted file mode 100644 +index de455dd..0000000 +--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S ++++ /dev/null +@@ -1,177 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <kernel-features.h> +-#include <pthread-pi-defines.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +- .text +- +- /* int pthread_cond_broadcast (pthread_cond_t *cond) */ +-ENTRY(__pthread_cond_broadcast) +- +- LIBC_PROBE (cond_broadcast, 1, %rdi) +- +- /* Get internal lock. */ +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jnz 1f +- +-2: addq $cond_futex, %rdi +- movq total_seq-cond_futex(%rdi), %r9 +- cmpq wakeup_seq-cond_futex(%rdi), %r9 +- jna 4f +- +- /* Cause all currently waiting threads to recognize they are +- woken up. */ +- movq %r9, wakeup_seq-cond_futex(%rdi) +- movq %r9, woken_seq-cond_futex(%rdi) +- addq %r9, %r9 +- movl %r9d, (%rdi) +- incl broadcast_seq-cond_futex(%rdi) +- +- /* Get the address of the mutex used. */ +- mov dep_mutex-cond_futex(%rdi), %R8_LP +- +- /* Unlock. */ +- LOCK +- decl cond_lock-cond_futex(%rdi) +- jne 7f +- +-8: cmp $-1, %R8_LP +- je 9f +- +- /* Do not use requeue for pshared condvars. */ +- testl $PS_BIT, MUTEX_KIND(%r8) +- jne 9f +- +- /* Requeue to a PI mutex if the PI bit is set. */ +- movl MUTEX_KIND(%r8), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- je 81f +- +- /* Wake up all threads. */ +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %esi +-#else +- movl %fs:PRIVATE_FUTEX, %esi +- orl $FUTEX_CMP_REQUEUE, %esi +-#endif +- movl $SYS_futex, %eax +- movl $1, %edx +- movl $0x7fffffff, %r10d +- syscall +- +- /* For any kind of error, which mainly is EAGAIN, we try again +- with WAKE. The general test also covers running on old +- kernels. */ +- cmpq $-4095, %rax +- jae 9f +- +-10: xorl %eax, %eax +- retq +- +- /* Wake up all threads. */ +-81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi +- movl $SYS_futex, %eax +- movl $1, %edx +- movl $0x7fffffff, %r10d +- syscall +- +- /* For any kind of error, which mainly is EAGAIN, we try again +- with WAKE. The general test also covers running on old +- kernels. */ +- cmpq $-4095, %rax +- jb 10b +- jmp 9f +- +- .align 16 +- /* Unlock. */ +-4: LOCK +- decl cond_lock-cond_futex(%rdi) +- jne 5f +- +-6: xorl %eax, %eax +- retq +- +- /* Initial locking failed. */ +-1: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- jmp 2b +- +- /* Unlock in loop requires wakeup. */ +-5: addq $cond_lock-cond_futex, %rdi +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- jmp 6b +- +- /* Unlock in loop requires wakeup. */ +-7: addq $cond_lock-cond_futex, %rdi +- cmp $-1, %R8_LP +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- subq $cond_lock-cond_futex, %rdi +- jmp 8b +- +-9: /* The futex requeue functionality is not available. */ +- cmp $-1, %R8_LP +- movl $0x7fffffff, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- jmp 10b +-END(__pthread_cond_broadcast) +- +-versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, +- GLIBC_2_3_2) +diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S +deleted file mode 100644 +index da14bc3..0000000 +--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S ++++ /dev/null +@@ -1,161 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <pthread-pi-defines.h> +-#include <kernel-features.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +- +- .text +- +-ENTRY(__pthread_cond_signal) +- +- LIBC_PROBE (cond_signal, 1, %rdi) +- +- /* Get internal lock. */ +- movq %rdi, %r8 +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jnz 1f +- +-2: addq $cond_futex, %rdi +- movq total_seq(%r8), %rcx +- cmpq wakeup_seq(%r8), %rcx +- jbe 4f +- +- /* Bump the wakeup number. */ +- addq $1, wakeup_seq(%r8) +- addl $1, (%rdi) +- +- /* Wake up one thread. */ +- LP_OP(cmp) $-1, dep_mutex(%r8) +- movl $FUTEX_WAKE_OP, %esi +- movl $1, %edx +- movl $SYS_futex, %eax +- je 8f +- +- /* Get the address of the mutex used. */ +- mov dep_mutex(%r8), %RCX_LP +- movl MUTEX_KIND(%rcx), %r11d +- andl $(ROBUST_BIT|PI_BIT), %r11d +- cmpl $PI_BIT, %r11d +- je 9f +- +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $(FUTEX_WAKE_OP|FUTEX_PRIVATE_FLAG), %esi +-#else +- orl %fs:PRIVATE_FUTEX, %esi +-#endif +- +-8: movl $1, %r10d +-#if cond_lock != 0 +- addq $cond_lock, %r8 +-#endif +- movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %r9d +- syscall +-#if cond_lock != 0 +- subq $cond_lock, %r8 +-#endif +- /* For any kind of error, we try again with WAKE. +- The general test also covers running on old kernels. */ +- cmpq $-4095, %rax +- jae 7f +- +- xorl %eax, %eax +- retq +- +- /* Wake up one thread and requeue none in the PI Mutex case. */ +-9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi +- movq %rcx, %r8 +- xorq %r10, %r10 +- movl (%rdi), %r9d // XXX Can this be right? +- syscall +- +- leaq -cond_futex(%rdi), %r8 +- +- /* For any kind of error, we try again with WAKE. +- The general test also covers running on old kernels. */ +- cmpq $-4095, %rax +- jb 4f +- +-7: +-#ifdef __ASSUME_PRIVATE_FUTEX +- andl $FUTEX_PRIVATE_FLAG, %esi +-#else +- andl %fs:PRIVATE_FUTEX, %esi +-#endif +- orl $FUTEX_WAKE, %esi +- movl $SYS_futex, %eax +- /* %rdx should be 1 already from $FUTEX_WAKE_OP syscall. +- movl $1, %edx */ +- syscall +- +- /* Unlock. */ +-4: LOCK +-#if cond_lock == 0 +- decl (%r8) +-#else +- decl cond_lock(%r8) +-#endif +- jne 5f +- +-6: xorl %eax, %eax +- retq +- +- /* Initial locking failed. */ +-1: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- jmp 2b +- +- /* Unlock in loop requires wakeup. */ +-5: +- movq %r8, %rdi +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- jmp 6b +-END(__pthread_cond_signal) +- +-versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, +- GLIBC_2_3_2) +diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S +deleted file mode 100644 +index 82ffa1a..0000000 +--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S ++++ /dev/null +@@ -1,623 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <pthread-pi-defines.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +-#include <kernel-features.h> +- +- +- .text +- +- +-/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, +- const struct timespec *abstime) */ +- .globl __pthread_cond_timedwait +- .type __pthread_cond_timedwait, @function +- .align 16 +-__pthread_cond_timedwait: +-.LSTARTCODE: +- cfi_startproc +-#ifdef SHARED +- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, +- DW.ref.__gcc_personality_v0) +- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +-#else +- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) +- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +-#endif +- +- pushq %r12 +- cfi_adjust_cfa_offset(8) +- cfi_rel_offset(%r12, 0) +- pushq %r13 +- cfi_adjust_cfa_offset(8) +- cfi_rel_offset(%r13, 0) +- pushq %r14 +- cfi_adjust_cfa_offset(8) +- cfi_rel_offset(%r14, 0) +- pushq %r15 +- cfi_adjust_cfa_offset(8) +- cfi_rel_offset(%r15, 0) +-#define FRAME_SIZE (32+8) +- subq $FRAME_SIZE, %rsp +- cfi_adjust_cfa_offset(FRAME_SIZE) +- cfi_remember_state +- +- LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx) +- +- cmpq $1000000000, 8(%rdx) +- movl $EINVAL, %eax +- jae 48f +- +- /* Stack frame: +- +- rsp + 48 +- +--------------------------+ +- rsp + 32 | timeout value | +- +--------------------------+ +- rsp + 24 | old wake_seq value | +- +--------------------------+ +- rsp + 16 | mutex pointer | +- +--------------------------+ +- rsp + 8 | condvar pointer | +- +--------------------------+ +- rsp + 4 | old broadcast_seq value | +- +--------------------------+ +- rsp + 0 | old cancellation mode | +- +--------------------------+ +- */ +- +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- +- /* Prepare structure passed to cancellation handler. */ +- movq %rdi, 8(%rsp) +- movq %rsi, 16(%rsp) +- movq %rdx, %r13 +- +- je 22f +- mov %RSI_LP, dep_mutex(%rdi) +- +-22: +- xorb %r15b, %r15b +- +- /* Get internal lock. */ +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jnz 31f +- +- /* Unlock the mutex. */ +-32: movq 16(%rsp), %rdi +- xorl %esi, %esi +- callq __pthread_mutex_unlock_usercnt +- +- testl %eax, %eax +- jne 46f +- +- movq 8(%rsp), %rdi +- incq total_seq(%rdi) +- incl cond_futex(%rdi) +- addl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Get and store current wakeup_seq value. */ +- movq 8(%rsp), %rdi +- movq wakeup_seq(%rdi), %r9 +- movl broadcast_seq(%rdi), %edx +- movq %r9, 24(%rsp) +- movl %edx, 4(%rsp) +- +- cmpq $0, (%r13) +- movq $-ETIMEDOUT, %r14 +- js 36f +- +-38: movl cond_futex(%rdi), %r12d +- +- /* Unlock. */ +- LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- jne 33f +- +-.LcleanupSTART1: +-34: callq __pthread_enable_asynccancel +- movl %eax, (%rsp) +- +- movq %r13, %r10 +- movl $FUTEX_WAIT_BITSET, %esi +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- je 60f +- +- mov dep_mutex(%rdi), %R8_LP +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%r8), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 61f +- +- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi +- xorl %eax, %eax +- /* The following only works like this because we only support +- two clocks, represented using a single bit. */ +- testl $1, cond_nwaiters(%rdi) +- movl $FUTEX_CLOCK_REALTIME, %edx +- cmove %edx, %eax +- orl %eax, %esi +- movq %r12, %rdx +- addq $cond_futex, %rdi +- movl $SYS_futex, %eax +- syscall +- +- cmpl $0, %eax +- sete %r15b +- +-#ifdef __ASSUME_REQUEUE_PI +- jmp 62f +-#else +- je 62f +- +- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns +- successfully, it has already locked the mutex for us and the +- pi_flag (%r15b) is set to denote that fact. However, if another +- thread changed the futex value before we entered the wait, the +- syscall may return an EAGAIN and the mutex is not locked. We go +- ahead with a success anyway since later we look at the pi_flag to +- decide if we got the mutex or not. The sequence numbers then make +- sure that only one of the threads actually wake up. We retry using +- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal +- and PI futexes don't mix. +- +- Note that we don't check for EAGAIN specifically; we assume that the +- only other error the futex function could return is EAGAIN (barring +- the ETIMEOUT of course, for the timeout case in futex) since +- anything else would mean an error in our function. It is too +- expensive to do that check for every call (which is quite common in +- case of a large number of threads), so it has been skipped. */ +- cmpl $-ENOSYS, %eax +- jne 62f +- +- subq $cond_futex, %rdi +-#endif +- +-61: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi +-60: xorb %r15b, %r15b +- xorl %eax, %eax +- /* The following only works like this because we only support +- two clocks, represented using a single bit. */ +- testl $1, cond_nwaiters(%rdi) +- movl $FUTEX_CLOCK_REALTIME, %edx +- movl $0xffffffff, %r9d +- cmove %edx, %eax +- orl %eax, %esi +- movq %r12, %rdx +- addq $cond_futex, %rdi +- movl $SYS_futex, %eax +- syscall +-62: movq %rax, %r14 +- +- movl (%rsp), %edi +- callq __pthread_disable_asynccancel +-.LcleanupEND1: +- +- /* Lock. */ +- movq 8(%rsp), %rdi +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jne 35f +- +-36: movl broadcast_seq(%rdi), %edx +- +- movq woken_seq(%rdi), %rax +- +- movq wakeup_seq(%rdi), %r9 +- +- cmpl 4(%rsp), %edx +- jne 53f +- +- cmpq 24(%rsp), %r9 +- jbe 45f +- +- cmpq %rax, %r9 +- ja 39f +- +-45: cmpq $-ETIMEDOUT, %r14 +- je 99f +- +- /* We need to go back to futex_wait. If we're using requeue_pi, then +- release the mutex we had acquired and go back. */ +- test %r15b, %r15b +- jz 38b +- +- /* Adjust the mutex values first and then unlock it. The unlock +- should always succeed or else the kernel did not lock the +- mutex correctly. */ +- movq %r8, %rdi +- callq __pthread_mutex_cond_lock_adjust +- xorl %esi, %esi +- callq __pthread_mutex_unlock_usercnt +- /* Reload cond_var. */ +- movq 8(%rsp), %rdi +- jmp 38b +- +-99: incq wakeup_seq(%rdi) +- incl cond_futex(%rdi) +- movl $ETIMEDOUT, %r14d +- jmp 44f +- +-53: xorq %r14, %r14 +- jmp 54f +- +-39: xorq %r14, %r14 +-44: incq woken_seq(%rdi) +- +-54: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- cmpq $0xffffffffffffffff, total_seq(%rdi) +- jne 55f +- movl cond_nwaiters(%rdi), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 55f +- +- addq $cond_nwaiters, %rdi +- LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi) +- movl $1, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- subq $cond_nwaiters, %rdi +- +-55: LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- jne 40f +- +- /* If requeue_pi is used the kernel performs the locking of the +- mutex. */ +-41: movq 16(%rsp), %rdi +- testb %r15b, %r15b +- jnz 64f +- +- callq __pthread_mutex_cond_lock +- +-63: testq %rax, %rax +- cmoveq %r14, %rax +- +-48: addq $FRAME_SIZE, %rsp +- cfi_adjust_cfa_offset(-FRAME_SIZE) +- popq %r15 +- cfi_adjust_cfa_offset(-8) +- cfi_restore(%r15) +- popq %r14 +- cfi_adjust_cfa_offset(-8) +- cfi_restore(%r14) +- popq %r13 +- cfi_adjust_cfa_offset(-8) +- cfi_restore(%r13) +- popq %r12 +- cfi_adjust_cfa_offset(-8) +- cfi_restore(%r12) +- +- retq +- +- cfi_restore_state +- +-64: callq __pthread_mutex_cond_lock_adjust +- movq %r14, %rax +- jmp 48b +- +- /* Initial locking failed. */ +-31: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +- jmp 32b +- +- /* Unlock in loop requires wakeup. */ +-33: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- jmp 34b +- +- /* Locking in loop failed. */ +-35: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- jmp 36b +- +- /* Unlock after loop requires wakeup. */ +-40: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- jmp 41b +- +- /* The initial unlocking of the mutex failed. */ +-46: movq 8(%rsp), %rdi +- movq %rax, (%rsp) +- LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- jne 47f +- +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- +-47: movq (%rsp), %rax +- jmp 48b +- +- .size __pthread_cond_timedwait, .-__pthread_cond_timedwait +-versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, +- GLIBC_2_3_2) +- +- +- .align 16 +- .type __condvar_cleanup2, @function +-__condvar_cleanup2: +- /* Stack frame: +- +- rsp + 72 +- +--------------------------+ +- rsp + 64 | %r12 | +- +--------------------------+ +- rsp + 56 | %r13 | +- +--------------------------+ +- rsp + 48 | %r14 | +- +--------------------------+ +- rsp + 24 | unused | +- +--------------------------+ +- rsp + 16 | mutex pointer | +- +--------------------------+ +- rsp + 8 | condvar pointer | +- +--------------------------+ +- rsp + 4 | old broadcast_seq value | +- +--------------------------+ +- rsp + 0 | old cancellation mode | +- +--------------------------+ +- */ +- +- movq %rax, 24(%rsp) +- +- /* Get internal lock. */ +- movq 8(%rsp), %rdi +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jz 1f +- +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- +-1: movl broadcast_seq(%rdi), %edx +- cmpl 4(%rsp), %edx +- jne 3f +- +- /* We increment the wakeup_seq counter only if it is lower than +- total_seq. If this is not the case the thread was woken and +- then canceled. In this case we ignore the signal. */ +- movq total_seq(%rdi), %rax +- cmpq wakeup_seq(%rdi), %rax +- jbe 6f +- incq wakeup_seq(%rdi) +- incl cond_futex(%rdi) +-6: incq woken_seq(%rdi) +- +-3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- xorq %r12, %r12 +- cmpq $0xffffffffffffffff, total_seq(%rdi) +- jne 4f +- movl cond_nwaiters(%rdi), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 4f +- +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- leaq cond_nwaiters(%rdi), %rdi +- movl $1, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- subq $cond_nwaiters, %rdi +- movl $1, %r12d +- +-4: LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- je 2f +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- +- /* Wake up all waiters to make sure no signal gets lost. */ +-2: testq %r12, %r12 +- jnz 5f +- addq $cond_futex, %rdi +- LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi) +- movl $0x7fffffff, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- +- /* Lock the mutex only if we don't own it already. This only happens +- in case of PI mutexes, if we got cancelled after a successful +- return of the futex syscall and before disabling async +- cancellation. */ +-5: movq 16(%rsp), %rdi +- movl MUTEX_KIND(%rdi), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 7f +- +- movl (%rdi), %eax +- andl $TID_MASK, %eax +- cmpl %eax, %fs:TID +- jne 7f +- /* We managed to get the lock. Fix it up before returning. */ +- callq __pthread_mutex_cond_lock_adjust +- jmp 8f +- +-7: callq __pthread_mutex_cond_lock +- +-8: movq 24(%rsp), %rdi +- movq FRAME_SIZE(%rsp), %r15 +- movq FRAME_SIZE+8(%rsp), %r14 +- movq FRAME_SIZE+16(%rsp), %r13 +- movq FRAME_SIZE+24(%rsp), %r12 +-.LcallUR: +- call _Unwind_Resume +- hlt +-.LENDCODE: +- cfi_endproc +- .size __condvar_cleanup2, .-__condvar_cleanup2 +- +- +- .section .gcc_except_table,"a",@progbits +-.LexceptSTART: +- .byte DW_EH_PE_omit # @LPStart format +- .byte DW_EH_PE_omit # @TType format +- .byte DW_EH_PE_uleb128 # call-site format +- .uleb128 .Lcstend-.Lcstbegin +-.Lcstbegin: +- .uleb128 .LcleanupSTART1-.LSTARTCODE +- .uleb128 .LcleanupEND1-.LcleanupSTART1 +- .uleb128 __condvar_cleanup2-.LSTARTCODE +- .uleb128 0 +- .uleb128 .LcallUR-.LSTARTCODE +- .uleb128 .LENDCODE-.LcallUR +- .uleb128 0 +- .uleb128 0 +-.Lcstend: +- +- +-#ifdef SHARED +- .hidden DW.ref.__gcc_personality_v0 +- .weak DW.ref.__gcc_personality_v0 +- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits +- .align LP_SIZE +- .type DW.ref.__gcc_personality_v0, @object +- .size DW.ref.__gcc_personality_v0, LP_SIZE +-DW.ref.__gcc_personality_v0: +- ASM_ADDR __gcc_personality_v0 +-#endif +diff --git a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S +deleted file mode 100644 +index c82f37b..0000000 +--- a/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S ++++ /dev/null +@@ -1,555 +0,0 @@ +-/* Copyright (C) 2002-2016 Free Software Foundation, Inc. +- This file is part of the GNU C Library. +- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. +- +- The GNU C Library is free software; you can redistribute it and/or +- modify it under the terms of the GNU Lesser General Public +- License as published by the Free Software Foundation; either +- version 2.1 of the License, or (at your option) any later version. +- +- The GNU C Library is distributed in the hope that it will be useful, +- but WITHOUT ANY WARRANTY; without even the implied warranty of +- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- Lesser General Public License for more details. +- +- You should have received a copy of the GNU Lesser General Public +- License along with the GNU C Library; if not, see +- <http://www.gnu.org/licenses/>. */ +- +-#include <sysdep.h> +-#include <shlib-compat.h> +-#include <lowlevellock.h> +-#include <lowlevelcond.h> +-#include <tcb-offsets.h> +-#include <pthread-pi-defines.h> +-#include <pthread-errnos.h> +-#include <stap-probe.h> +- +-#include <kernel-features.h> +- +- +- .text +- +-/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */ +- .globl __pthread_cond_wait +- .type __pthread_cond_wait, @function +- .align 16 +-__pthread_cond_wait: +-.LSTARTCODE: +- cfi_startproc +-#ifdef SHARED +- cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, +- DW.ref.__gcc_personality_v0) +- cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +-#else +- cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) +- cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +-#endif +- +-#define FRAME_SIZE (32+8) +- leaq -FRAME_SIZE(%rsp), %rsp +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +- /* Stack frame: +- +- rsp + 32 +- +--------------------------+ +- rsp + 24 | old wake_seq value | +- +--------------------------+ +- rsp + 16 | mutex pointer | +- +--------------------------+ +- rsp + 8 | condvar pointer | +- +--------------------------+ +- rsp + 4 | old broadcast_seq value | +- +--------------------------+ +- rsp + 0 | old cancellation mode | +- +--------------------------+ +- */ +- +- LIBC_PROBE (cond_wait, 2, %rdi, %rsi) +- +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- +- /* Prepare structure passed to cancellation handler. */ +- movq %rdi, 8(%rsp) +- movq %rsi, 16(%rsp) +- +- je 15f +- mov %RSI_LP, dep_mutex(%rdi) +- +- /* Get internal lock. */ +-15: movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jne 1f +- +- /* Unlock the mutex. */ +-2: movq 16(%rsp), %rdi +- xorl %esi, %esi +- callq __pthread_mutex_unlock_usercnt +- +- testl %eax, %eax +- jne 12f +- +- movq 8(%rsp), %rdi +- incq total_seq(%rdi) +- incl cond_futex(%rdi) +- addl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Get and store current wakeup_seq value. */ +- movq 8(%rsp), %rdi +- movq wakeup_seq(%rdi), %r9 +- movl broadcast_seq(%rdi), %edx +- movq %r9, 24(%rsp) +- movl %edx, 4(%rsp) +- +- /* Unlock. */ +-8: movl cond_futex(%rdi), %edx +- LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- jne 3f +- +-.LcleanupSTART: +-4: callq __pthread_enable_asynccancel +- movl %eax, (%rsp) +- +- xorq %r10, %r10 +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- leaq cond_futex(%rdi), %rdi +- movl $FUTEX_WAIT, %esi +- je 60f +- +- mov dep_mutex-cond_futex(%rdi), %R8_LP +- /* Requeue to a non-robust PI mutex if the PI bit is set and +- the robust bit is not set. */ +- movl MUTEX_KIND(%r8), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 61f +- +- movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi +- movl $SYS_futex, %eax +- syscall +- +- cmpl $0, %eax +- sete %r8b +- +-#ifdef __ASSUME_REQUEUE_PI +- jmp 62f +-#else +- je 62f +- +- /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns +- successfully, it has already locked the mutex for us and the +- pi_flag (%r8b) is set to denote that fact. However, if another +- thread changed the futex value before we entered the wait, the +- syscall may return an EAGAIN and the mutex is not locked. We go +- ahead with a success anyway since later we look at the pi_flag to +- decide if we got the mutex or not. The sequence numbers then make +- sure that only one of the threads actually wake up. We retry using +- normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal +- and PI futexes don't mix. +- +- Note that we don't check for EAGAIN specifically; we assume that the +- only other error the futex function could return is EAGAIN since +- anything else would mean an error in our function. It is too +- expensive to do that check for every call (which is quite common in +- case of a large number of threads), so it has been skipped. */ +- cmpl $-ENOSYS, %eax +- jne 62f +- +-# ifndef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAIT, %esi +-# endif +-#endif +- +-61: +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi +-#else +- orl %fs:PRIVATE_FUTEX, %esi +-#endif +-60: xorb %r8b, %r8b +- movl $SYS_futex, %eax +- syscall +- +-62: movl (%rsp), %edi +- callq __pthread_disable_asynccancel +-.LcleanupEND: +- +- /* Lock. */ +- movq 8(%rsp), %rdi +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jnz 5f +- +-6: movl broadcast_seq(%rdi), %edx +- +- movq woken_seq(%rdi), %rax +- +- movq wakeup_seq(%rdi), %r9 +- +- cmpl 4(%rsp), %edx +- jne 16f +- +- cmpq 24(%rsp), %r9 +- jbe 19f +- +- cmpq %rax, %r9 +- jna 19f +- +- incq woken_seq(%rdi) +- +- /* Unlock */ +-16: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- cmpq $0xffffffffffffffff, total_seq(%rdi) +- jne 17f +- movl cond_nwaiters(%rdi), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 17f +- +- addq $cond_nwaiters, %rdi +- LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi) +- movl $1, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- subq $cond_nwaiters, %rdi +- +-17: LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- jne 10f +- +- /* If requeue_pi is used the kernel performs the locking of the +- mutex. */ +-11: movq 16(%rsp), %rdi +- testb %r8b, %r8b +- jnz 18f +- +- callq __pthread_mutex_cond_lock +- +-14: leaq FRAME_SIZE(%rsp), %rsp +- cfi_adjust_cfa_offset(-FRAME_SIZE) +- +- /* We return the result of the mutex_lock operation. */ +- retq +- +- cfi_adjust_cfa_offset(FRAME_SIZE) +- +-18: callq __pthread_mutex_cond_lock_adjust +- xorl %eax, %eax +- jmp 14b +- +- /* We need to go back to futex_wait. If we're using requeue_pi, then +- release the mutex we had acquired and go back. */ +-19: testb %r8b, %r8b +- jz 8b +- +- /* Adjust the mutex values first and then unlock it. The unlock +- should always succeed or else the kernel did not lock the mutex +- correctly. */ +- movq 16(%rsp), %rdi +- callq __pthread_mutex_cond_lock_adjust +- movq %rdi, %r8 +- xorl %esi, %esi +- callq __pthread_mutex_unlock_usercnt +- /* Reload cond_var. */ +- movq 8(%rsp), %rdi +- jmp 8b +- +- /* Initial locking failed. */ +-1: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +- jmp 2b +- +- /* Unlock in loop requires wakeup. */ +-3: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- /* The call preserves %rdx. */ +- callq __lll_unlock_wake +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- jmp 4b +- +- /* Locking in loop failed. */ +-5: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- jmp 6b +- +- /* Unlock after loop requires wakeup. */ +-10: +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- jmp 11b +- +- /* The initial unlocking of the mutex failed. */ +-12: movq %rax, %r10 +- movq 8(%rsp), %rdi +- LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- je 13f +- +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_unlock_wake +- +-13: movq %r10, %rax +- jmp 14b +- +- .size __pthread_cond_wait, .-__pthread_cond_wait +-versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, +- GLIBC_2_3_2) +- +- +- .align 16 +- .type __condvar_cleanup1, @function +- .globl __condvar_cleanup1 +- .hidden __condvar_cleanup1 +-__condvar_cleanup1: +- /* Stack frame: +- +- rsp + 32 +- +--------------------------+ +- rsp + 24 | unused | +- +--------------------------+ +- rsp + 16 | mutex pointer | +- +--------------------------+ +- rsp + 8 | condvar pointer | +- +--------------------------+ +- rsp + 4 | old broadcast_seq value | +- +--------------------------+ +- rsp + 0 | old cancellation mode | +- +--------------------------+ +- */ +- +- movq %rax, 24(%rsp) +- +- /* Get internal lock. */ +- movq 8(%rsp), %rdi +- movl $1, %esi +- xorl %eax, %eax +- LOCK +-#if cond_lock == 0 +- cmpxchgl %esi, (%rdi) +-#else +- cmpxchgl %esi, cond_lock(%rdi) +-#endif +- jz 1f +- +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- callq __lll_lock_wait +-#if cond_lock != 0 +- subq $cond_lock, %rdi +-#endif +- +-1: movl broadcast_seq(%rdi), %edx +- cmpl 4(%rsp), %edx +- jne 3f +- +- /* We increment the wakeup_seq counter only if it is lower than +- total_seq. If this is not the case the thread was woken and +- then canceled. In this case we ignore the signal. */ +- movq total_seq(%rdi), %rax +- cmpq wakeup_seq(%rdi), %rax +- jbe 6f +- incq wakeup_seq(%rdi) +- incl cond_futex(%rdi) +-6: incq woken_seq(%rdi) +- +-3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi) +- +- /* Wake up a thread which wants to destroy the condvar object. */ +- xorl %ecx, %ecx +- cmpq $0xffffffffffffffff, total_seq(%rdi) +- jne 4f +- movl cond_nwaiters(%rdi), %eax +- andl $~((1 << nwaiters_shift) - 1), %eax +- jne 4f +- +- LP_OP(cmp) $-1, dep_mutex(%rdi) +- leaq cond_nwaiters(%rdi), %rdi +- movl $1, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- subq $cond_nwaiters, %rdi +- movl $1, %ecx +- +-4: LOCK +-#if cond_lock == 0 +- decl (%rdi) +-#else +- decl cond_lock(%rdi) +-#endif +- je 2f +-#if cond_lock != 0 +- addq $cond_lock, %rdi +-#endif +- LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi) +- movl $LLL_PRIVATE, %eax +- movl $LLL_SHARED, %esi +- cmovne %eax, %esi +- /* The call preserves %rcx. */ +- callq __lll_unlock_wake +- +- /* Wake up all waiters to make sure no signal gets lost. */ +-2: testl %ecx, %ecx +- jnz 5f +- addq $cond_futex, %rdi +- LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi) +- movl $0x7fffffff, %edx +-#ifdef __ASSUME_PRIVATE_FUTEX +- movl $FUTEX_WAKE, %eax +- movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi +- cmove %eax, %esi +-#else +- movl $0, %eax +- movl %fs:PRIVATE_FUTEX, %esi +- cmove %eax, %esi +- orl $FUTEX_WAKE, %esi +-#endif +- movl $SYS_futex, %eax +- syscall +- +- /* Lock the mutex only if we don't own it already. This only happens +- in case of PI mutexes, if we got cancelled after a successful +- return of the futex syscall and before disabling async +- cancellation. */ +-5: movq 16(%rsp), %rdi +- movl MUTEX_KIND(%rdi), %eax +- andl $(ROBUST_BIT|PI_BIT), %eax +- cmpl $PI_BIT, %eax +- jne 7f +- +- movl (%rdi), %eax +- andl $TID_MASK, %eax +- cmpl %eax, %fs:TID +- jne 7f +- /* We managed to get the lock. Fix it up before returning. */ +- callq __pthread_mutex_cond_lock_adjust +- jmp 8f +- +- +-7: callq __pthread_mutex_cond_lock +- +-8: movq 24(%rsp), %rdi +-.LcallUR: +- call _Unwind_Resume +- hlt +-.LENDCODE: +- cfi_endproc +- .size __condvar_cleanup1, .-__condvar_cleanup1 +- +- +- .section .gcc_except_table,"a",@progbits +-.LexceptSTART: +- .byte DW_EH_PE_omit # @LPStart format +- .byte DW_EH_PE_omit # @TType format +- .byte DW_EH_PE_uleb128 # call-site format +- .uleb128 .Lcstend-.Lcstbegin +-.Lcstbegin: +- .uleb128 .LcleanupSTART-.LSTARTCODE +- .uleb128 .LcleanupEND-.LcleanupSTART +- .uleb128 __condvar_cleanup1-.LSTARTCODE +- .uleb128 0 +- .uleb128 .LcallUR-.LSTARTCODE +- .uleb128 .LENDCODE-.LcallUR +- .uleb128 0 +- .uleb128 0 +-.Lcstend: +- +- +-#ifdef SHARED +- .hidden DW.ref.__gcc_personality_v0 +- .weak DW.ref.__gcc_personality_v0 +- .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits +- .align LP_SIZE +- .type DW.ref.__gcc_personality_v0, @object +- .size DW.ref.__gcc_personality_v0, LP_SIZE +-DW.ref.__gcc_personality_v0: +- ASM_ADDR __gcc_personality_v0 +-#endif +diff --git a/sysdeps/x86/bits/pthreadtypes.h b/sysdeps/x86/bits/pthreadtypes.h +index 16b8f4f..a3a738f 100644 +--- a/sysdeps/x86/bits/pthreadtypes.h ++++ b/sysdeps/x86/bits/pthreadtypes.h +@@ -140,14 +140,27 @@ typedef union + { + struct + { +- int __lock; +- unsigned int __futex; +- __extension__ unsigned long long int __total_seq; +- __extension__ unsigned long long int __wakeup_seq; +- __extension__ unsigned long long int __woken_seq; +- void *__mutex; +- unsigned int __nwaiters; +- unsigned int __broadcast_seq; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __wseq; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __wseq32; ++ }; ++ __extension__ union ++ { ++ __extension__ unsigned long long int __g1_start; ++ struct { ++ unsigned int __low; ++ unsigned int __high; ++ } __g1_start32; ++ }; ++ unsigned int __g_refs[2]; ++ unsigned int __g_size[2]; ++ unsigned int __g1_orig_size; ++ unsigned int __wrefs; ++ unsigned int __g_signals[2]; + } __data; + char __size[__SIZEOF_PTHREAD_COND_T]; + __extension__ long long int __align; +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0005-Remove-__ASSUME_REQUEUE_PI.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0005-Remove-__ASSUME_REQUEUE_PI.patch new file mode 100644 index 000000000..8d4ba4107 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0005-Remove-__ASSUME_REQUEUE_PI.patch @@ -0,0 +1,149 @@ +From 27b7131d3d8133bf3a5ce72d4e4ff4dfadd71f20 Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 30 Jun 2017 12:08:29 +0300 +Subject: [PATCH 5/6] Remove __ASSUME_REQUEUE_PI + +The new cond var implementation (ed19993b5b0d) removed all the +__ASSUME_{REQUEUE_PI,FUTEX_LOCK_PI} internal usage so there is no +need to keep defining it. This patch removes all USE_REQUEUE_PI +and __ASSUME_REQUEUE_PI. It is as follow up from BZ#18463. + +Checked with a build for x86_64-linux-gnu, arm-linux-gnueabhf, +m68-linux-gnu, mips64-linux-gnu, and sparc64-linux-gnu. + + * nptl/pthreadP.h (USE_REQUEUE_PI): Remove ununsed macro. + * sysdeps/unix/sysv/linux/arm/kernel-features.h + (__ASSUME_REQUEUE_PI): Likewise. + * sysdeps/unix/sysv/linux/kernel-features.h + (__ASSUME_REQUEUE_PI): Likewise. + * sysdeps/unix/sysv/linux/m68k/kernel-features.h + (__ASSUME_REQUEUE_PI): Likewise. + * sysdeps/unix/sysv/linux/mips/kernel-features.h + (__ASSUME_REQUEUE_PI): Likewise. + * sysdeps/unix/sysv/linux/sparc/kernel-features.h + (__ASSUME_REQUEUE_PI): Likewise. + +Upstream-Status: Backport + +Author: Adhemerval Zanella <adhemerval.zanella@linaro.org> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 14 ++++++++++++++ + nptl/pthreadP.h | 12 ------------ + sysdeps/unix/sysv/linux/arm/kernel-features.h | 1 - + sysdeps/unix/sysv/linux/kernel-features.h | 5 ----- + sysdeps/unix/sysv/linux/m68k/kernel-features.h | 1 - + sysdeps/unix/sysv/linux/mips/kernel-features.h | 1 - + sysdeps/unix/sysv/linux/sparc/kernel-features.h | 1 - + 7 files changed, 14 insertions(+), 21 deletions(-) + +diff --git a/ChangeLog b/ChangeLog +index c94db7b..44c518b 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,17 @@ ++2017-04-04 Adhemerval Zanella <adhemerval.zanella@linaro.org> ++ ++ * nptl/pthreadP.h (USE_REQUEUE_PI): Remove ununsed macro. ++ * sysdeps/unix/sysv/linux/arm/kernel-features.h ++ (__ASSUME_REQUEUE_PI): Likewise. ++ * sysdeps/unix/sysv/linux/kernel-features.h ++ (__ASSUME_REQUEUE_PI): Likewise. ++ * sysdeps/unix/sysv/linux/m68k/kernel-features.h ++ (__ASSUME_REQUEUE_PI): Likewise. ++ * sysdeps/unix/sysv/linux/mips/kernel-features.h ++ (__ASSUME_REQUEUE_PI): Likewise. ++ * sysdeps/unix/sysv/linux/sparc/kernel-features.h ++ (__ASSUME_REQUEUE_PI): Likewise. ++ + 2016-12-31 Torvald Riegel <triegel@redhat.com> + + [BZ #13165] +diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h +index e9992bc..730c4ad 100644 +--- a/nptl/pthreadP.h ++++ b/nptl/pthreadP.h +@@ -594,18 +594,6 @@ extern void __wait_lookup_done (void) attribute_hidden; + # define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name); + #endif + +-/* Test if the mutex is suitable for the FUTEX_WAIT_REQUEUE_PI operation. */ +-#if (defined lll_futex_wait_requeue_pi \ +- && defined __ASSUME_REQUEUE_PI) +-# define USE_REQUEUE_PI(mut) \ +- ((mut) && (mut) != (void *) ~0l \ +- && (((mut)->__data.__kind \ +- & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) \ +- == PTHREAD_MUTEX_PRIO_INHERIT_NP)) +-#else +-# define USE_REQUEUE_PI(mut) 0 +-#endif +- + /* Returns 0 if POL is a valid scheduling policy. */ + static inline int + check_sched_policy_attr (int pol) +diff --git a/sysdeps/unix/sysv/linux/arm/kernel-features.h b/sysdeps/unix/sysv/linux/arm/kernel-features.h +index 6ca607e..339ad45 100644 +--- a/sysdeps/unix/sysv/linux/arm/kernel-features.h ++++ b/sysdeps/unix/sysv/linux/arm/kernel-features.h +@@ -23,7 +23,6 @@ + futex_atomic_cmpxchg_inatomic, depending on kernel + configuration. */ + #if __LINUX_KERNEL_VERSION < 0x030E03 +-# undef __ASSUME_REQUEUE_PI + # undef __ASSUME_SET_ROBUST_LIST + #endif + +diff --git a/sysdeps/unix/sysv/linux/kernel-features.h b/sysdeps/unix/sysv/linux/kernel-features.h +index 1d3b554..9f2cf9f 100644 +--- a/sysdeps/unix/sysv/linux/kernel-features.h ++++ b/sysdeps/unix/sysv/linux/kernel-features.h +@@ -101,11 +101,6 @@ + #define __ASSUME_PREADV 1 + #define __ASSUME_PWRITEV 1 + +-/* Support for FUTEX_*_REQUEUE_PI was added in 2.6.31 (but some +- architectures lack futex_atomic_cmpxchg_inatomic in some +- configurations). */ +-#define __ASSUME_REQUEUE_PI 1 +- + /* Support for recvmmsg functionality was added in 2.6.33. The macros + defined correspond to those for accept4. */ + #if __LINUX_KERNEL_VERSION >= 0x020621 +diff --git a/sysdeps/unix/sysv/linux/m68k/kernel-features.h b/sysdeps/unix/sysv/linux/m68k/kernel-features.h +index 46ec601..174c1c6 100644 +--- a/sysdeps/unix/sysv/linux/m68k/kernel-features.h ++++ b/sysdeps/unix/sysv/linux/m68k/kernel-features.h +@@ -51,6 +51,5 @@ + + /* No support for PI futexes or robust mutexes before 3.10 for m68k. */ + #if __LINUX_KERNEL_VERSION < 0x030a00 +-# undef __ASSUME_REQUEUE_PI + # undef __ASSUME_SET_ROBUST_LIST + #endif +diff --git a/sysdeps/unix/sysv/linux/mips/kernel-features.h b/sysdeps/unix/sysv/linux/mips/kernel-features.h +index b486d90..a795911c 100644 +--- a/sysdeps/unix/sysv/linux/mips/kernel-features.h ++++ b/sysdeps/unix/sysv/linux/mips/kernel-features.h +@@ -24,7 +24,6 @@ + /* The MIPS kernel does not support futex_atomic_cmpxchg_inatomic if + emulating LL/SC. */ + #if __mips == 1 || defined _MIPS_ARCH_R5900 +-# undef __ASSUME_REQUEUE_PI + # undef __ASSUME_SET_ROBUST_LIST + #endif + +diff --git a/sysdeps/unix/sysv/linux/sparc/kernel-features.h b/sysdeps/unix/sysv/linux/sparc/kernel-features.h +index 69c9c7c..dd3ddf0 100644 +--- a/sysdeps/unix/sysv/linux/sparc/kernel-features.h ++++ b/sysdeps/unix/sysv/linux/sparc/kernel-features.h +@@ -34,6 +34,5 @@ + /* 32-bit SPARC kernels do not support + futex_atomic_cmpxchg_inatomic. */ + #if !defined __arch64__ && !defined __sparc_v9__ +-# undef __ASSUME_REQUEUE_PI + # undef __ASSUME_SET_ROBUST_LIST + #endif +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0006-Fix-atomic_fetch_xor_release.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0006-Fix-atomic_fetch_xor_release.patch new file mode 100644 index 000000000..7616efa18 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0006-Fix-atomic_fetch_xor_release.patch @@ -0,0 +1,81 @@ +From b671f20cc160238b62894d032a55baf85867106e Mon Sep 17 00:00:00 2001 +From: Catalin Enache <catalin.enache@windriver.com> +Date: Fri, 30 Jun 2017 19:12:43 +0300 +Subject: [PATCH 6/6] Fix atomic_fetch_xor_release. + +No code uses atomic_fetch_xor_release except for the upcoming +conditional variable rewrite. Therefore there is no user +visible bug here. The use of atomic_compare_and_exchange_bool_rel +is removed (since it doesn't exist anymore), and is replaced +by atomic_compare_exchange_weak_release. + +We use weak_release because it provides better performance in +the loop (the weak semantic) and because the xor is release MO +(the release semantic). We don't reload expected in the loop +because atomic_compare_and_exchange_weak_release does this for +us as part of the CAS failure. + +It is otherwise a fairly plain conversion that fixes building +the new condvar for 32-bit x86. Passes all regression tests +for x86. + +Upstream-Status: Backport + +Author: Carlos O'Donell <carlos@systemhalted.org> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + ChangeLog | 6 ++++++ + include/atomic.h | 19 +++++++++++-------- + 2 files changed, 17 insertions(+), 8 deletions(-) + +diff --git a/ChangeLog b/ChangeLog +index 44c518b..893262d 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,9 @@ ++2016-10-26 Carlos O'Donell <carlos@redhat.com> ++ ++ * include/atomic.h ++ [USE_COMPILER_ATOMIC_BUILTINS && !atomic_fetch_xor_release] ++ (atomic_fetch_xor_release): Use atomic_compare_exchange_weak_release. ++ + 2017-04-04 Adhemerval Zanella <adhemerval.zanella@linaro.org> + + * nptl/pthreadP.h (USE_REQUEUE_PI): Remove ununsed macro. +diff --git a/include/atomic.h b/include/atomic.h +index 5a8e7e7..c8b4664 100644 +--- a/include/atomic.h ++++ b/include/atomic.h +@@ -777,18 +777,21 @@ void __atomic_link_error (void); + # endif + + # ifndef atomic_fetch_xor_release ++/* Failing the atomic_compare_exchange_weak_release reloads the value in ++ __atg104_expected, so we need only do the XOR again and retry. */ + # define atomic_fetch_xor_release(mem, operand) \ +- ({ __typeof (*(mem)) __atg104_old; \ +- __typeof (mem) __atg104_memp = (mem); \ ++ ({ __typeof (mem) __atg104_memp = (mem); \ ++ __typeof (*(mem)) __atg104_expected = (*__atg104_memp); \ ++ __typeof (*(mem)) __atg104_desired; \ + __typeof (*(mem)) __atg104_op = (operand); \ + \ + do \ +- __atg104_old = (*__atg104_memp); \ +- while (__builtin_expect \ +- (atomic_compare_and_exchange_bool_rel ( \ +- __atg104_memp, __atg104_old ^ __atg104_op, __atg104_old), 0));\ +- \ +- __atg104_old; }) ++ __atg104_desired = __atg104_expected ^ __atg104_op; \ ++ while (__glibc_unlikely \ ++ (atomic_compare_exchange_weak_release ( \ ++ __atg104_memp, &__atg104_expected, __atg104_desired) \ ++ == 0)); \ ++ __atg104_expected; }) + #endif + + #endif /* !USE_ATOMIC_COMPILER_BUILTINS */ +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0028-Bug-20116-Fix-use-after-free-in-pthread_create.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0028-Bug-20116-Fix-use-after-free-in-pthread_create.patch new file mode 100644 index 000000000..66f1fcd0f --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/0028-Bug-20116-Fix-use-after-free-in-pthread_create.patch @@ -0,0 +1,668 @@ +From e7ba24f05d86acf7072e066ea6d7b235a106688c Mon Sep 17 00:00:00 2001 +From: Carlos O'Donell <carlos@redhat.com> +Date: Sat, 28 Jan 2017 19:13:34 -0500 +Subject: [PATCH] Bug 20116: Fix use after free in pthread_create() + +The commit documents the ownership rules around 'struct pthread' and +when a thread can read or write to the descriptor. With those ownership +rules in place it becomes obvious that pd->stopped_start should not be +touched in several of the paths during thread startup, particularly so +for detached threads. In the case of detached threads, between the time +the thread is created by the OS kernel and the creating thread checks +pd->stopped_start, the detached thread might have already exited and the +memory for pd unmapped. As a regression test we add a simple test which +exercises this exact case by quickly creating detached threads with +large enough stacks to ensure the thread stack cache is bypassed and the +stacks are unmapped. Before the fix the testcase segfaults, after the +fix it works correctly and completes without issue. + +For a detailed discussion see: +https://www.sourceware.org/ml/libc-alpha/2017-01/msg00505.html + +(cherry-picked from commit f8bf15febcaf137bbec5a61101e88cd5a9d56ca8) + +Upstream-Status: Backport [master] +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + ChangeLog | 23 ++++ + nptl/Makefile | 3 +- + nptl/createthread.c | 10 +- + nptl/pthread_create.c | 207 +++++++++++++++++++++++++++------ + nptl/pthread_getschedparam.c | 1 + + nptl/pthread_setschedparam.c | 1 + + nptl/pthread_setschedprio.c | 1 + + nptl/tpp.c | 2 + + nptl/tst-create-detached.c | 137 ++++++++++++++++++++++ + sysdeps/nacl/createthread.c | 10 +- + sysdeps/unix/sysv/linux/createthread.c | 16 ++- + 11 files changed, 356 insertions(+), 55 deletions(-) + create mode 100644 nptl/tst-create-detached.c + +diff --git a/ChangeLog b/ChangeLog +index 84ae7a7af8..0fbda9020e 100644 +--- a/ChangeLog ++++ b/ChangeLog +@@ -1,3 +1,26 @@ ++2016-01-28 Carlos O'Donell <carlos@redhat.com> ++ Alexey Makhalov <amakhalov@vmware.com> ++ Florian Weimer <fweimer@redhat.com> ++ ++ [BZ #20116] ++ * nptl/pthread_create.c: Document concurrency notes. ++ Enhance thread creation notes. ++ (create_thread): Use bool *stopped_start. ++ (START_THREAD_DEFN): Comment ownership of PD. ++ (__pthread_create_2_1): Add local bool stopped_start and use ++ that instead of pd->stopped_start where appropriate. ++ * nptl/createthread.c (create_thread): Use bool *stopped_start. ++ * sysdeps/nacl/createthread.c (create_thread): Use bool *stopped_start. ++ * sysdeps/unix/sysv/linux/createthread.c (create_thread): Likewise. ++ * nptl/tst-create-detached.c: New file. ++ * nptl/Makefile (tests): Add tst-create-detached. ++ * nptl/pthread_getschedparam.c (__pthread_getschedparam): ++ Reference the enhanced thread creation notes. ++ * nptl/pthread_setschedparam.c (__pthread_setschedparam): Likewise. ++ * nptl/pthread_setschedprio.c (pthread_setschedprio): Likewise. ++ * nptl/tpp.c (__pthread_tpp_change_priority): Likewise. ++ (__pthread_current_priority): Likewise. ++ + 2016-08-04 Carlos O'Donell <carlos@redhat.com> + + * po/de.po: Update from Translation Project. +diff --git a/nptl/Makefile b/nptl/Makefile +index 0d8aadebed..7dec4edb53 100644 +--- a/nptl/Makefile ++++ b/nptl/Makefile +@@ -290,7 +290,8 @@ tests = tst-typesizes \ + tst-initializers1 $(addprefix tst-initializers1-,\ + c89 gnu89 c99 gnu99 c11 gnu11) \ + tst-bad-schedattr \ +- tst-thread_local1 tst-mutex-errorcheck tst-robust10 ++ tst-thread_local1 tst-mutex-errorcheck tst-robust10 \ ++ tst-create-detached \ + xtests = tst-setuid1 tst-setuid1-static tst-setuid2 \ + tst-mutexpp1 tst-mutexpp6 tst-mutexpp10 + test-srcs = tst-oddstacklimit +diff --git a/nptl/createthread.c b/nptl/createthread.c +index ba2f9c7167..328f85865d 100644 +--- a/nptl/createthread.c ++++ b/nptl/createthread.c +@@ -25,16 +25,14 @@ + + static int + create_thread (struct pthread *pd, const struct pthread_attr *attr, +- bool stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) ++ bool *stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) + { + /* If the implementation needs to do some tweaks to the thread after + it has been created at the OS level, it can set STOPPED_START here. */ + +- pd->stopped_start = stopped_start; +- if (__glibc_unlikely (stopped_start)) +- /* We make sure the thread does not run far by forcing it to get a +- lock. We lock it here too so that the new thread cannot continue +- until we tell it to. */ ++ pd->stopped_start = *stopped_start; ++ if (__glibc_unlikely (*stopped_start)) ++ /* See CONCURRENCY NOTES in nptl/pthread_create.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + return ENOSYS; +diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c +index a834063ad5..44b17bec86 100644 +--- a/nptl/pthread_create.c ++++ b/nptl/pthread_create.c +@@ -54,25 +54,141 @@ unsigned int __nptl_nthreads = 1; + /* Code to allocate and deallocate a stack. */ + #include "allocatestack.c" + +-/* createthread.c defines this function, and two macros: ++/* CONCURRENCY NOTES: ++ ++ Understanding who is the owner of the 'struct pthread' or 'PD' ++ (refers to the value of the 'struct pthread *pd' function argument) ++ is critically important in determining exactly which operations are ++ allowed and which are not and when, particularly when it comes to the ++ implementation of pthread_create, pthread_join, pthread_detach, and ++ other functions which all operate on PD. ++ ++ The owner of PD is responsible for freeing the final resources ++ associated with PD, and may examine the memory underlying PD at any ++ point in time until it frees it back to the OS or to reuse by the ++ runtime. ++ ++ The thread which calls pthread_create is called the creating thread. ++ The creating thread begins as the owner of PD. ++ ++ During startup the new thread may examine PD in coordination with the ++ owner thread (which may be itself). ++ ++ The four cases of ownership transfer are: ++ ++ (1) Ownership of PD is released to the process (all threads may use it) ++ after the new thread starts in a joinable state ++ i.e. pthread_create returns a usable pthread_t. ++ ++ (2) Ownership of PD is released to the new thread starting in a detached ++ state. ++ ++ (3) Ownership of PD is dynamically released to a running thread via ++ pthread_detach. ++ ++ (4) Ownership of PD is acquired by the thread which calls pthread_join. ++ ++ Implementation notes: ++ ++ The PD->stopped_start and thread_ran variables are used to determine ++ exactly which of the four ownership states we are in and therefore ++ what actions can be taken. For example after (2) we cannot read or ++ write from PD anymore since the thread may no longer exist and the ++ memory may be unmapped. The most complicated cases happen during ++ thread startup: ++ ++ (a) If the created thread is in a detached (PTHREAD_CREATE_DETACHED), ++ or joinable (default PTHREAD_CREATE_JOINABLE) state and ++ STOPPED_START is true, then the creating thread has ownership of ++ PD until the PD->lock is released by pthread_create. If any ++ errors occur we are in states (c), (d), or (e) below. ++ ++ (b) If the created thread is in a detached state ++ (PTHREAD_CREATED_DETACHED), and STOPPED_START is false, then the ++ creating thread has ownership of PD until it invokes the OS ++ kernel's thread creation routine. If this routine returns ++ without error, then the created thread owns PD; otherwise, see ++ (c) and (e) below. ++ ++ (c) If the detached thread setup failed and THREAD_RAN is true, then ++ the creating thread releases ownership to the new thread by ++ sending a cancellation signal. All threads set THREAD_RAN to ++ true as quickly as possible after returning from the OS kernel's ++ thread creation routine. ++ ++ (d) If the joinable thread setup failed and THREAD_RAN is true, then ++ then the creating thread retains ownership of PD and must cleanup ++ state. Ownership cannot be released to the process via the ++ return of pthread_create since a non-zero result entails PD is ++ undefined and therefore cannot be joined to free the resources. ++ We privately call pthread_join on the thread to finish handling ++ the resource shutdown (Or at least we should, see bug 19511). ++ ++ (e) If the thread creation failed and THREAD_RAN is false, then the ++ creating thread retains ownership of PD and must cleanup state. ++ No waiting for the new thread is required because it never ++ started. ++ ++ The nptl_db interface: ++ ++ The interface with nptl_db requires that we enqueue PD into a linked ++ list and then call a function which the debugger will trap. The PD ++ will then be dequeued and control returned to the thread. The caller ++ at the time must have ownership of PD and such ownership remains ++ after control returns to thread. The enqueued PD is removed from the ++ linked list by the nptl_db callback td_thr_event_getmsg. The debugger ++ must ensure that the thread does not resume execution, otherwise ++ ownership of PD may be lost and examining PD will not be possible. ++ ++ Note that the GNU Debugger as of (December 10th 2015) commit ++ c2c2a31fdb228d41ce3db62b268efea04bd39c18 no longer uses ++ td_thr_event_getmsg and several other related nptl_db interfaces. The ++ principal reason for this is that nptl_db does not support non-stop ++ mode where other threads can run concurrently and modify runtime ++ structures currently in use by the debugger and the nptl_db ++ interface. ++ ++ Axioms: ++ ++ * The create_thread function can never set stopped_start to false. ++ * The created thread can read stopped_start but never write to it. ++ * The variable thread_ran is set some time after the OS thread ++ creation routine returns, how much time after the thread is created ++ is unspecified, but it should be as quickly as possible. ++ ++*/ ++ ++/* CREATE THREAD NOTES: ++ ++ createthread.c defines the create_thread function, and two macros: + START_THREAD_DEFN and START_THREAD_SELF (see below). + +- create_thread is obliged to initialize PD->stopped_start. It +- should be true if the STOPPED_START parameter is true, or if +- create_thread needs the new thread to synchronize at startup for +- some other implementation reason. If PD->stopped_start will be +- true, then create_thread is obliged to perform the operation +- "lll_lock (PD->lock, LLL_PRIVATE)" before starting the thread. ++ create_thread must initialize PD->stopped_start. It should be true ++ if the STOPPED_START parameter is true, or if create_thread needs the ++ new thread to synchronize at startup for some other implementation ++ reason. If STOPPED_START will be true, then create_thread is obliged ++ to lock PD->lock before starting the thread. Then pthread_create ++ unlocks PD->lock which synchronizes-with START_THREAD_DEFN in the ++ child thread which does an acquire/release of PD->lock as the last ++ action before calling the user entry point. The goal of all of this ++ is to ensure that the required initial thread attributes are applied ++ (by the creating thread) before the new thread runs user code. Note ++ that the the functions pthread_getschedparam, pthread_setschedparam, ++ pthread_setschedprio, __pthread_tpp_change_priority, and ++ __pthread_current_priority reuse the same lock, PD->lock, for a ++ similar purpose e.g. synchronizing the setting of similar thread ++ attributes. These functions are never called before the thread is ++ created, so don't participate in startup syncronization, but given ++ that the lock is present already and in the unlocked state, reusing ++ it saves space. + + The return value is zero for success or an errno code for failure. + If the return value is ENOMEM, that will be translated to EAGAIN, + so create_thread need not do that. On failure, *THREAD_RAN should + be set to true iff the thread actually started up and then got +- cancelled before calling user code (*PD->start_routine), in which +- case it is responsible for doing its own cleanup. */ +- ++ canceled before calling user code (*PD->start_routine). */ + static int create_thread (struct pthread *pd, const struct pthread_attr *attr, +- bool stopped_start, STACK_VARIABLES_PARMS, ++ bool *stopped_start, STACK_VARIABLES_PARMS, + bool *thread_ran); + + #include <createthread.c> +@@ -314,12 +430,19 @@ START_THREAD_DEFN + /* Store the new cleanup handler info. */ + THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf); + ++ /* We are either in (a) or (b), and in either case we either own ++ PD already (2) or are about to own PD (1), and so our only ++ restriction would be that we can't free PD until we know we ++ have ownership (see CONCURRENCY NOTES above). */ + if (__glibc_unlikely (pd->stopped_start)) + { + int oldtype = CANCEL_ASYNC (); + + /* Get the lock the parent locked to force synchronization. */ + lll_lock (pd->lock, LLL_PRIVATE); ++ ++ /* We have ownership of PD now. */ ++ + /* And give it up right away. */ + lll_unlock (pd->lock, LLL_PRIVATE); + +@@ -378,7 +501,8 @@ START_THREAD_DEFN + pd, pd->nextevent)); + } + +- /* Now call the function to signal the event. */ ++ /* Now call the function which signals the event. See ++ CONCURRENCY NOTES for the nptl_db interface comments. */ + __nptl_death_event (); + } + } +@@ -642,19 +766,28 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, + that cares whether the thread count is correct. */ + atomic_increment (&__nptl_nthreads); + +- bool thread_ran = false; ++ /* Our local value of stopped_start and thread_ran can be accessed at ++ any time. The PD->stopped_start may only be accessed if we have ++ ownership of PD (see CONCURRENCY NOTES above). */ ++ bool stopped_start = false; bool thread_ran = false; + + /* Start the thread. */ + if (__glibc_unlikely (report_thread_creation (pd))) + { +- /* Create the thread. We always create the thread stopped +- so that it does not get far before we tell the debugger. */ +- retval = create_thread (pd, iattr, true, STACK_VARIABLES_ARGS, +- &thread_ran); ++ stopped_start = true; ++ ++ /* We always create the thread stopped at startup so we can ++ notify the debugger. */ ++ retval = create_thread (pd, iattr, &stopped_start, ++ STACK_VARIABLES_ARGS, &thread_ran); + if (retval == 0) + { +- /* create_thread should have set this so that the logic below can +- test it. */ ++ /* We retain ownership of PD until (a) (see CONCURRENCY NOTES ++ above). */ ++ ++ /* Assert stopped_start is true in both our local copy and the ++ PD copy. */ ++ assert (stopped_start); + assert (pd->stopped_start); + + /* Now fill in the information about the new thread in +@@ -671,26 +804,30 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, + pd, pd->nextevent) + != 0); + +- /* Now call the function which signals the event. */ ++ /* Now call the function which signals the event. See ++ CONCURRENCY NOTES for the nptl_db interface comments. */ + __nptl_create_event (); + } + } + else +- retval = create_thread (pd, iattr, false, STACK_VARIABLES_ARGS, +- &thread_ran); ++ retval = create_thread (pd, iattr, &stopped_start, ++ STACK_VARIABLES_ARGS, &thread_ran); + + if (__glibc_unlikely (retval != 0)) + { +- /* If thread creation "failed", that might mean that the thread got +- created and ran a little--short of running user code--but then +- create_thread cancelled it. In that case, the thread will do all +- its own cleanup just like a normal thread exit after a successful +- creation would do. */ +- + if (thread_ran) +- assert (pd->stopped_start); ++ /* State (c) or (d) and we may not have PD ownership (see ++ CONCURRENCY NOTES above). We can assert that STOPPED_START ++ must have been true because thread creation didn't fail, but ++ thread attribute setting did. */ ++ /* See bug 19511 which explains why doing nothing here is a ++ resource leak for a joinable thread. */ ++ assert (stopped_start); + else + { ++ /* State (e) and we have ownership of PD (see CONCURRENCY ++ NOTES above). */ ++ + /* Oops, we lied for a second. */ + atomic_decrement (&__nptl_nthreads); + +@@ -710,10 +847,14 @@ __pthread_create_2_1 (pthread_t *newthread, const pthread_attr_t *attr, + } + else + { +- if (pd->stopped_start) +- /* The thread blocked on this lock either because we're doing TD_CREATE +- event reporting, or for some other reason that create_thread chose. +- Now let it run free. */ ++ /* We don't know if we have PD ownership. Once we check the local ++ stopped_start we'll know if we're in state (a) or (b) (see ++ CONCURRENCY NOTES above). */ ++ if (stopped_start) ++ /* State (a), we own PD. The thread blocked on this lock either ++ because we're doing TD_CREATE event reporting, or for some ++ other reason that create_thread chose. Now let it run ++ free. */ + lll_unlock (pd->lock, LLL_PRIVATE); + + /* We now have for sure more than one thread. The main thread might +diff --git a/nptl/pthread_getschedparam.c b/nptl/pthread_getschedparam.c +index b887881baf..de71171a08 100644 +--- a/nptl/pthread_getschedparam.c ++++ b/nptl/pthread_getschedparam.c +@@ -35,6 +35,7 @@ __pthread_getschedparam (pthread_t threadid, int *policy, + + int result = 0; + ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + /* The library is responsible for maintaining the values at all +diff --git a/nptl/pthread_setschedparam.c b/nptl/pthread_setschedparam.c +index dfb52b9dbf..dcb520f1c8 100644 +--- a/nptl/pthread_setschedparam.c ++++ b/nptl/pthread_setschedparam.c +@@ -36,6 +36,7 @@ __pthread_setschedparam (pthread_t threadid, int policy, + + int result = 0; + ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + struct sched_param p; +diff --git a/nptl/pthread_setschedprio.c b/nptl/pthread_setschedprio.c +index cefc6481d6..8134b50560 100644 +--- a/nptl/pthread_setschedprio.c ++++ b/nptl/pthread_setschedprio.c +@@ -38,6 +38,7 @@ pthread_setschedprio (pthread_t threadid, int prio) + struct sched_param param; + param.sched_priority = prio; + ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + /* If the thread should have higher priority because of some +diff --git a/nptl/tpp.c b/nptl/tpp.c +index e175bf4d53..223bd6bbee 100644 +--- a/nptl/tpp.c ++++ b/nptl/tpp.c +@@ -114,6 +114,7 @@ __pthread_tpp_change_priority (int previous_prio, int new_prio) + if (priomax == newpriomax) + return 0; + ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ + lll_lock (self->lock, LLL_PRIVATE); + + tpp->priomax = newpriomax; +@@ -165,6 +166,7 @@ __pthread_current_priority (void) + + int result = 0; + ++ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ + lll_lock (self->lock, LLL_PRIVATE); + + if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) +diff --git a/nptl/tst-create-detached.c b/nptl/tst-create-detached.c +new file mode 100644 +index 0000000000..ea93e441c7 +--- /dev/null ++++ b/nptl/tst-create-detached.c +@@ -0,0 +1,137 @@ ++/* Bug 20116: Test rapid creation of detached threads. ++ Copyright (C) 2017 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; see the file COPYING.LIB. If ++ not, see <http://www.gnu.org/licenses/>. */ ++ ++/* The goal of the test is to trigger a failure if the parent touches ++ any part of the thread descriptor after the detached thread has ++ exited. We test this by creating many detached threads with large ++ stacks. The stacks quickly fill the the stack cache and subsequent ++ threads will start to cause the thread stacks to be immediately ++ unmapped to satisfy the stack cache max. With the stacks being ++ unmapped the parent's read of any part of the thread descriptor will ++ trigger a segfault. That segfault is what we are trying to cause, ++ since any segfault is a defect in the implementation. */ ++ ++#include <pthread.h> ++#include <stdio.h> ++#include <errno.h> ++#include <unistd.h> ++#include <stdbool.h> ++#include <sys/resource.h> ++#include <support/xthread.h> ++ ++/* Number of threads to create. */ ++enum { threads_to_create = 100000 }; ++ ++/* Number of threads which should spawn other threads. */ ++enum { creator_threads = 2 }; ++ ++/* Counter of threads created so far. This is incremented by all the ++ running creator threads. */ ++static unsigned threads_created; ++ ++/* Thread callback which does nothing, so that the thread exits ++ immediatedly. */ ++static void * ++do_nothing (void *arg) ++{ ++ return NULL; ++} ++ ++/* Attribute indicating that the thread should be created in a detached ++ fashion. */ ++static pthread_attr_t detached; ++ ++/* Barrier to synchronize initialization. */ ++static pthread_barrier_t barrier; ++ ++static void * ++creator_thread (void *arg) ++{ ++ int ret; ++ xpthread_barrier_wait (&barrier); ++ ++ while (true) ++ { ++ pthread_t thr; ++ /* Thread creation will fail if the kernel does not free old ++ threads quickly enough, so we do not report errors. */ ++ ret = pthread_create (&thr, &detached, do_nothing, NULL); ++ if (ret == 0 && __atomic_add_fetch (&threads_created, 1, __ATOMIC_SEQ_CST) ++ >= threads_to_create) ++ break; ++ } ++ ++ return NULL; ++} ++ ++static int ++do_test (void) ++{ ++ /* Limit the size of the process, so that memory allocation will ++ fail without impacting the entire system. */ ++ { ++ struct rlimit limit; ++ if (getrlimit (RLIMIT_AS, &limit) != 0) ++ { ++ printf ("FAIL: getrlimit (RLIMIT_AS) failed: %m\n"); ++ return 1; ++ } ++ /* This limit, 800MB, is just a heuristic. Any value can be ++ picked. */ ++ long target = 800 * 1024 * 1024; ++ if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > target) ++ { ++ limit.rlim_cur = target; ++ if (setrlimit (RLIMIT_AS, &limit) != 0) ++ { ++ printf ("FAIL: setrlimit (RLIMIT_AS) failed: %m\n"); ++ return 1; ++ } ++ } ++ } ++ ++ xpthread_attr_init (&detached); ++ ++ xpthread_attr_setdetachstate (&detached, PTHREAD_CREATE_DETACHED); ++ ++ /* A large thread stack seems beneficial for reproducing a race ++ condition in detached thread creation. The goal is to reach the ++ limit of the runtime thread stack cache such that the detached ++ thread's stack is unmapped after exit and causes a segfault when ++ the parent reads the thread descriptor data stored on the the ++ unmapped stack. */ ++ xpthread_attr_setstacksize (&detached, 16 * 1024 * 1024); ++ ++ xpthread_barrier_init (&barrier, NULL, creator_threads); ++ ++ pthread_t threads[creator_threads]; ++ ++ for (int i = 0; i < creator_threads; ++i) ++ threads[i] = xpthread_create (NULL, creator_thread, NULL); ++ ++ for (int i = 0; i < creator_threads; ++i) ++ xpthread_join (threads[i]); ++ ++ xpthread_attr_destroy (&detached); ++ ++ xpthread_barrier_destroy (&barrier); ++ ++ return 0; ++} ++ ++#include <support/test-driver.c> +diff --git a/sysdeps/nacl/createthread.c b/sysdeps/nacl/createthread.c +index 7b571c34e2..5465558cc1 100644 +--- a/sysdeps/nacl/createthread.c ++++ b/sysdeps/nacl/createthread.c +@@ -32,15 +32,13 @@ static void start_thread (void) __attribute__ ((noreturn)); + + static int + create_thread (struct pthread *pd, const struct pthread_attr *attr, +- bool stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) ++ bool *stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) + { + pd->tid = __nacl_get_tid (pd); + +- pd->stopped_start = stopped_start; +- if (__glibc_unlikely (stopped_start)) +- /* We make sure the thread does not run far by forcing it to get a +- lock. We lock it here too so that the new thread cannot continue +- until we tell it to. */ ++ pd->stopped_start = *stopped_start; ++ if (__glibc_unlikely (*stopped_start)) ++ /* See CONCURRENCY NOTES in nptl/pthread_create.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + TLS_DEFINE_INIT_TP (tp, pd); +diff --git a/sysdeps/unix/sysv/linux/createthread.c b/sysdeps/unix/sysv/linux/createthread.c +index 6d32cece48..66ddae61d4 100644 +--- a/sysdeps/unix/sysv/linux/createthread.c ++++ b/sysdeps/unix/sysv/linux/createthread.c +@@ -46,7 +46,7 @@ static int start_thread (void *arg) __attribute__ ((noreturn)); + + static int + create_thread (struct pthread *pd, const struct pthread_attr *attr, +- bool stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) ++ bool *stopped_start, STACK_VARIABLES_PARMS, bool *thread_ran) + { + /* Determine whether the newly created threads has to be started + stopped since we have to set the scheduling parameters or set the +@@ -54,13 +54,11 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr, + if (attr != NULL + && (__glibc_unlikely (attr->cpuset != NULL) + || __glibc_unlikely ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0))) +- stopped_start = true; ++ *stopped_start = true; + +- pd->stopped_start = stopped_start; +- if (__glibc_unlikely (stopped_start)) +- /* We make sure the thread does not run far by forcing it to get a +- lock. We lock it here too so that the new thread cannot continue +- until we tell it to. */ ++ pd->stopped_start = *stopped_start; ++ if (__glibc_unlikely (*stopped_start)) ++ /* See CONCURRENCY NOTES in nptl/pthread_creat.c. */ + lll_lock (pd->lock, LLL_PRIVATE); + + /* We rely heavily on various flags the CLONE function understands: +@@ -117,7 +115,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr, + /* Set the affinity mask if necessary. */ + if (attr->cpuset != NULL) + { +- assert (stopped_start); ++ assert (*stopped_start); + + res = INTERNAL_SYSCALL (sched_setaffinity, err, 3, pd->tid, + attr->cpusetsize, attr->cpuset); +@@ -140,7 +138,7 @@ create_thread (struct pthread *pd, const struct pthread_attr *attr, + /* Set the scheduling parameters. */ + if ((attr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0) + { +- assert (stopped_start); ++ assert (*stopped_start); + + res = INTERNAL_SYSCALL (sched_setscheduler, err, 3, pd->tid, + pd->schedpolicy, &pd->schedparam); +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2016-6323.patch b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2016-6323.patch new file mode 100644 index 000000000..f9b9fa50d --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc/CVE-2016-6323.patch @@ -0,0 +1,39 @@ +glibc-2.24: Fix CVE-2016-6323 + +[No upstream tracking] -- https://sourceware.org/bugzilla/show_bug.cgi?id=20435 + +arm: mark __startcontext as .cantunwind, GNU + +Glibc bug where the makecontext function would create +an execution context which is incompatible with the unwinder, +causing it to hang when the generation of a backtrace is attempted. + +Upstream-Status: Backport [https://sourceware.org/git/gitweb.cgi?p=glibc.git;h=9e2ff6c9cc54c0b4402b8d49e4abe7000fde7617] +CVE: CVE-2016-6323 +Signed-off-by: Andrej Valek <andrej.valek@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +diff --git a/sysdeps/unix/sysv/linux/arm/setcontext.S b/sysdeps/unix/sysv/linux/arm/setcontext.S +index 603e508..d1f168f 100644 +--- a/sysdeps/unix/sysv/linux/arm/setcontext.S ++++ b/sysdeps/unix/sysv/linux/arm/setcontext.S +@@ -86,12 +86,19 @@ weak_alias(__setcontext, setcontext) + + /* Called when a makecontext() context returns. Start the + context in R4 or fall through to exit(). */ ++ /* Unwind descriptors are looked up based on PC - 2, so we have to ++ make sure to mark the instruction preceding the __startcontext ++ label as .cantunwind. */ ++ .fnstart ++ .cantunwind ++ nop + ENTRY(__startcontext) + movs r0, r4 + bne PLTJMP(__setcontext) + + @ New context was 0 - exit + b PLTJMP(HIDDEN_JUMPTARGET(exit)) ++ .fnend + END(__startcontext) + + #ifdef PIC diff --git a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc_2.24.bb b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc_2.24.bb index f5a21b258..e723e03dc 100644 --- a/import-layers/yocto-poky/meta/recipes-core/glibc/glibc_2.24.bb +++ b/import-layers/yocto-poky/meta/recipes-core/glibc/glibc_2.24.bb @@ -37,6 +37,14 @@ SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \ file://0024-eglibc-Forward-port-cross-locale-generation-support.patch \ file://0025-Define-DUMMY_LOCALE_T-if-not-defined.patch \ file://0026-build_local_scope.patch \ + file://0028-Bug-20116-Fix-use-after-free-in-pthread_create.patch \ + file://CVE-2016-6323.patch \ + file://0001-Add-atomic_exchange_relaxed.patch \ + file://0002-Add-atomic-operations-required-by-the-new-condition-.patch \ + file://0003-Add-pretty-printers-for-the-NPTL-lock-types.patch \ + file://0004-New-condvar-implementation-that-provides-stronger-or.patch \ + file://0005-Remove-__ASSUME_REQUEUE_PI.patch \ + file://0006-Fix-atomic_fetch_xor_release.patch \ " SRC_URI += "\ diff --git a/import-layers/yocto-poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/import-layers/yocto-poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb index e6ed0f482..0803ddb65 100644 --- a/import-layers/yocto-poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb +++ b/import-layers/yocto-poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb @@ -22,7 +22,7 @@ IMAGE_FSTYPES = "vmdk" inherit core-image module-base -SRCREV ?= "746c681be4c744d0c6c2d3225b94550241546f65" +SRCREV ?= "e92165f5cea1c345672dd866df6a44d1cd8b97ce" SRC_URI = "git://git.yoctoproject.org/poky;branch=morty \ file://Yocto_Build_Appliance.vmx \ file://Yocto_Build_Appliance.vmxf \ @@ -63,7 +63,7 @@ fakeroot do_populate_poky_src () { # Also save (for reference only) the actual SRCREV used to create this image echo "export BA_SRCREV=${SRCREV}" >> ${IMAGE_ROOTFS}/home/builder/.bashrc echo "" >> ${IMAGE_ROOTFS}/home/builder/.bashrc - echo "export PATH=$PATH:/sbin" >> ${IMAGE_ROOTFS}/home/builder/.bashrc + echo 'export PATH=$PATH:/sbin' >> ${IMAGE_ROOTFS}/home/builder/.bashrc echo "" >> ${IMAGE_ROOTFS}/home/builder/.bashrc echo "# If working behind a proxy and using the provided oe-git-proxy script" >> ${IMAGE_ROOTFS}/home/builder/.bashrc diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2016-9318.patch b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2016-9318.patch new file mode 100644 index 000000000..3581ab83d --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/CVE-2016-9318.patch @@ -0,0 +1,207 @@ +From 7fa1cd31552d52d50a9101f07c816ff6dd2d9f19 Mon Sep 17 00:00:00 2001 +From: Doran Moppert <dmoppert@redhat.com> +Date: Fri, 7 Apr 2017 16:45:56 +0200 +Subject: [PATCH] Add an XML_PARSE_NOXXE flag to block all entities loading + even local + +For https://bugzilla.gnome.org/show_bug.cgi?id=772726 + +* include/libxml/parser.h: Add a new parser flag XML_PARSE_NOXXE +* elfgcchack.h, xmlIO.h, xmlIO.c: associated loading routine +* include/libxml/xmlerror.h: new error raised +* xmllint.c: adds --noxxe flag to activate the option + +Upstream-Status: Backport +CVE: CVE-2016-9318 + +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + elfgcchack.h | 10 ++++++++++ + include/libxml/parser.h | 3 ++- + include/libxml/xmlIO.h | 8 ++++++++ + include/libxml/xmlerror.h | 1 + + parser.c | 4 ++++ + xmlIO.c | 40 +++++++++++++++++++++++++++++++++++----- + xmllint.c | 5 +++++ + 7 files changed, 65 insertions(+), 6 deletions(-) + +diff --git a/elfgcchack.h b/elfgcchack.h +index 8c52884..1b81dcd 100644 +--- a/elfgcchack.h ++++ b/elfgcchack.h +@@ -6547,6 +6547,16 @@ extern __typeof (xmlNoNetExternalEntityLoader) xmlNoNetExternalEntityLoader__int + #endif + #endif + ++#ifdef bottom_xmlIO ++#undef xmlNoXxeExternalEntityLoader ++extern __typeof (xmlNoXxeExternalEntityLoader) xmlNoXxeExternalEntityLoader __attribute((alias("xmlNoXxeExternalEntityLoader__internal_alias"))); ++#else ++#ifndef xmlNoXxeExternalEntityLoader ++extern __typeof (xmlNoXxeExternalEntityLoader) xmlNoXxeExternalEntityLoader__internal_alias __attribute((visibility("hidden"))); ++#define xmlNoXxeExternalEntityLoader xmlNoXxeExternalEntityLoader__internal_alias ++#endif ++#endif ++ + #ifdef bottom_tree + #undef xmlNodeAddContent + extern __typeof (xmlNodeAddContent) xmlNodeAddContent __attribute((alias("xmlNodeAddContent__internal_alias"))); +diff --git a/include/libxml/parser.h b/include/libxml/parser.h +index 47fbec0..63ca1b9 100644 +--- a/include/libxml/parser.h ++++ b/include/libxml/parser.h +@@ -1111,7 +1111,8 @@ typedef enum { + XML_PARSE_HUGE = 1<<19,/* relax any hardcoded limit from the parser */ + XML_PARSE_OLDSAX = 1<<20,/* parse using SAX2 interface before 2.7.0 */ + XML_PARSE_IGNORE_ENC= 1<<21,/* ignore internal document encoding hint */ +- XML_PARSE_BIG_LINES = 1<<22 /* Store big lines numbers in text PSVI field */ ++ XML_PARSE_BIG_LINES = 1<<22,/* Store big lines numbers in text PSVI field */ ++ XML_PARSE_NOXXE = 1<<23 /* Forbid any external entity loading */ + } xmlParserOption; + + XMLPUBFUN void XMLCALL +diff --git a/include/libxml/xmlIO.h b/include/libxml/xmlIO.h +index 3e41744..8d3fdef 100644 +--- a/include/libxml/xmlIO.h ++++ b/include/libxml/xmlIO.h +@@ -300,6 +300,14 @@ XMLPUBFUN xmlParserInputPtr XMLCALL + xmlParserCtxtPtr ctxt); + + /* ++ * A predefined entity loader external entity expansion ++ */ ++XMLPUBFUN xmlParserInputPtr XMLCALL ++ xmlNoXxeExternalEntityLoader (const char *URL, ++ const char *ID, ++ xmlParserCtxtPtr ctxt); ++ ++/* + * xmlNormalizeWindowsPath is obsolete, don't use it. + * Check xmlCanonicPath in uri.h for a better alternative. + */ +diff --git a/include/libxml/xmlerror.h b/include/libxml/xmlerror.h +index 037c16d..3036062 100644 +--- a/include/libxml/xmlerror.h ++++ b/include/libxml/xmlerror.h +@@ -470,6 +470,7 @@ typedef enum { + XML_IO_EADDRINUSE, /* 1554 */ + XML_IO_EALREADY, /* 1555 */ + XML_IO_EAFNOSUPPORT, /* 1556 */ ++ XML_IO_ILLEGAL_XXE, /* 1557 */ + XML_XINCLUDE_RECURSION=1600, + XML_XINCLUDE_PARSE_VALUE, /* 1601 */ + XML_XINCLUDE_ENTITY_DEF_MISMATCH, /* 1602 */ +diff --git a/parser.c b/parser.c +index 53a6b7f..609a270 100644 +--- a/parser.c ++++ b/parser.c +@@ -15350,6 +15350,10 @@ xmlCtxtUseOptionsInternal(xmlParserCtxtPtr ctxt, int options, const char *encodi + ctxt->options |= XML_PARSE_NONET; + options -= XML_PARSE_NONET; + } ++ if (options & XML_PARSE_NOXXE) { ++ ctxt->options |= XML_PARSE_NOXXE; ++ options -= XML_PARSE_NOXXE; ++ } + if (options & XML_PARSE_COMPACT) { + ctxt->options |= XML_PARSE_COMPACT; + options -= XML_PARSE_COMPACT; +diff --git a/xmlIO.c b/xmlIO.c +index 1a79c09..304f822 100644 +--- a/xmlIO.c ++++ b/xmlIO.c +@@ -210,6 +210,7 @@ static const char *IOerr[] = { + "adddress in use", /* EADDRINUSE */ + "already in use", /* EALREADY */ + "unknown address familly", /* EAFNOSUPPORT */ ++ "Attempt to load external entity %s", /* XML_IO_ILLEGAL_XXE */ + }; + + #if defined(_WIN32) || defined (__DJGPP__) && !defined (__CYGWIN__) +@@ -4053,13 +4054,22 @@ xmlDefaultExternalEntityLoader(const char *URL, const char *ID, + xmlGenericError(xmlGenericErrorContext, + "xmlDefaultExternalEntityLoader(%s, xxx)\n", URL); + #endif +- if ((ctxt != NULL) && (ctxt->options & XML_PARSE_NONET)) { ++ if (ctxt != NULL) { + int options = ctxt->options; + +- ctxt->options -= XML_PARSE_NONET; +- ret = xmlNoNetExternalEntityLoader(URL, ID, ctxt); +- ctxt->options = options; +- return(ret); ++ if (options & XML_PARSE_NOXXE) { ++ ctxt->options -= XML_PARSE_NOXXE; ++ ret = xmlNoXxeExternalEntityLoader(URL, ID, ctxt); ++ ctxt->options = options; ++ return(ret); ++ } ++ ++ if (options & XML_PARSE_NONET) { ++ ctxt->options -= XML_PARSE_NONET; ++ ret = xmlNoNetExternalEntityLoader(URL, ID, ctxt); ++ ctxt->options = options; ++ return(ret); ++ } + } + #ifdef LIBXML_CATALOG_ENABLED + resource = xmlResolveResourceFromCatalog(URL, ID, ctxt); +@@ -4160,6 +4170,13 @@ xmlNoNetExternalEntityLoader(const char *URL, const char *ID, + xmlParserInputPtr input = NULL; + xmlChar *resource = NULL; + ++ if (ctxt == NULL) { ++ return(NULL); ++ } ++ if (ctxt->input_id == 1) { ++ return xmlDefaultExternalEntityLoader((const char *) URL, ID, ctxt); ++ } ++ + #ifdef LIBXML_CATALOG_ENABLED + resource = xmlResolveResourceFromCatalog(URL, ID, ctxt); + #endif +@@ -4182,5 +4199,18 @@ xmlNoNetExternalEntityLoader(const char *URL, const char *ID, + return(input); + } + ++xmlParserInputPtr ++xmlNoXxeExternalEntityLoader(const char *URL, const char *ID, ++ xmlParserCtxtPtr ctxt) { ++ if (ctxt == NULL) { ++ return(NULL); ++ } ++ if (ctxt->input_id == 1) { ++ return xmlDefaultExternalEntityLoader((const char *) URL, ID, ctxt); ++ } ++ xmlIOErr(XML_IO_ILLEGAL_XXE, (const char *) URL); ++ return(NULL); ++} ++ + #define bottom_xmlIO + #include "elfgcchack.h" +diff --git a/xmllint.c b/xmllint.c +index 67f7adb..d9368c1 100644 +--- a/xmllint.c ++++ b/xmllint.c +@@ -3019,6 +3019,7 @@ static void usage(const char *name) { + printf("\t--path 'paths': provide a set of paths for resources\n"); + printf("\t--load-trace : print trace of all external entities loaded\n"); + printf("\t--nonet : refuse to fetch DTDs or entities over network\n"); ++ printf("\t--noxxe : forbid any external entity loading\n"); + printf("\t--nocompact : do not generate compact text nodes\n"); + printf("\t--htmlout : output results as HTML\n"); + printf("\t--nowrap : do not put HTML doc wrapper\n"); +@@ -3461,6 +3462,10 @@ main(int argc, char **argv) { + (!strcmp(argv[i], "--nonet"))) { + options |= XML_PARSE_NONET; + xmlSetExternalEntityLoader(xmlNoNetExternalEntityLoader); ++ } else if ((!strcmp(argv[i], "-noxxe")) || ++ (!strcmp(argv[i], "--noxxe"))) { ++ options |= XML_PARSE_NOXXE; ++ xmlSetExternalEntityLoader(xmlNoXxeExternalEntityLoader); + } else if ((!strcmp(argv[i], "-nocompact")) || + (!strcmp(argv[i], "--nocompact"))) { + options &= ~XML_PARSE_COMPACT; +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-4658.patch b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-4658.patch new file mode 100644 index 000000000..5412e8c02 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-4658.patch @@ -0,0 +1,269 @@ +libxml2-2.9.4: Fix CVE-2016-4658 + +[No upstream tracking] -- https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2016-4658 + +xpointer: Disallow namespace nodes in XPointer points and ranges + +Namespace nodes must be copied to avoid use-after-free errors. +But they don't necessarily have a physical representation in a +document, so simply disallow them in XPointer ranges. + +Upstream-Status: Backported + - [https://git.gnome.org/browse/libxml2/commit/?id=c1d1f7121194036608bf555f08d3062a36fd344b] + - [https://git.gnome.org/browse/libxml2/commit/?id=3f8a91036d338e51c059d54397a42d645f019c65] +CVE: CVE-2016-4658 +Signed-off-by: Andrej Valek <andrej.valek@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +diff --git a/xpointer.c b/xpointer.c +index 676c510..911680d 100644 +--- a/xpointer.c ++++ b/xpointer.c +@@ -320,6 +320,45 @@ xmlXPtrRangesEqual(xmlXPathObjectPtr range1, xmlXPathObjectPtr range2) { + } + + /** ++ * xmlXPtrNewRangeInternal: ++ * @start: the starting node ++ * @startindex: the start index ++ * @end: the ending point ++ * @endindex: the ending index ++ * ++ * Internal function to create a new xmlXPathObjectPtr of type range ++ * ++ * Returns the newly created object. ++ */ ++static xmlXPathObjectPtr ++xmlXPtrNewRangeInternal(xmlNodePtr start, int startindex, ++ xmlNodePtr end, int endindex) { ++ xmlXPathObjectPtr ret; ++ ++ /* ++ * Namespace nodes must be copied (see xmlXPathNodeSetDupNs). ++ * Disallow them for now. ++ */ ++ if ((start != NULL) && (start->type == XML_NAMESPACE_DECL)) ++ return(NULL); ++ if ((end != NULL) && (end->type == XML_NAMESPACE_DECL)) ++ return(NULL); ++ ++ ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); ++ if (ret == NULL) { ++ xmlXPtrErrMemory("allocating range"); ++ return(NULL); ++ } ++ memset(ret, 0, sizeof(xmlXPathObject)); ++ ret->type = XPATH_RANGE; ++ ret->user = start; ++ ret->index = startindex; ++ ret->user2 = end; ++ ret->index2 = endindex; ++ return(ret); ++} ++ ++/** + * xmlXPtrNewRange: + * @start: the starting node + * @startindex: the start index +@@ -344,17 +383,7 @@ xmlXPtrNewRange(xmlNodePtr start, int startindex, + if (endindex < 0) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start; +- ret->index = startindex; +- ret->user2 = end; +- ret->index2 = endindex; ++ ret = xmlXPtrNewRangeInternal(start, startindex, end, endindex); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -381,17 +410,8 @@ xmlXPtrNewRangePoints(xmlXPathObjectPtr start, xmlXPathObjectPtr end) { + if (end->type != XPATH_POINT) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start->user; +- ret->index = start->index; +- ret->user2 = end->user; +- ret->index2 = end->index; ++ ret = xmlXPtrNewRangeInternal(start->user, start->index, end->user, ++ end->index); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -416,17 +436,7 @@ xmlXPtrNewRangePointNode(xmlXPathObjectPtr start, xmlNodePtr end) { + if (start->type != XPATH_POINT) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start->user; +- ret->index = start->index; +- ret->user2 = end; +- ret->index2 = -1; ++ ret = xmlXPtrNewRangeInternal(start->user, start->index, end, -1); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -453,17 +463,7 @@ xmlXPtrNewRangeNodePoint(xmlNodePtr start, xmlXPathObjectPtr end) { + if (end->type != XPATH_POINT) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start; +- ret->index = -1; +- ret->user2 = end->user; +- ret->index2 = end->index; ++ ret = xmlXPtrNewRangeInternal(start, -1, end->user, end->index); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -486,17 +486,7 @@ xmlXPtrNewRangeNodes(xmlNodePtr start, xmlNodePtr end) { + if (end == NULL) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start; +- ret->index = -1; +- ret->user2 = end; +- ret->index2 = -1; ++ ret = xmlXPtrNewRangeInternal(start, -1, end, -1); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -516,17 +506,7 @@ xmlXPtrNewCollapsedRange(xmlNodePtr start) { + if (start == NULL) + return(NULL); + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start; +- ret->index = -1; +- ret->user2 = NULL; +- ret->index2 = -1; ++ ret = xmlXPtrNewRangeInternal(start, -1, NULL, -1); + return(ret); + } + +@@ -541,6 +521,8 @@ xmlXPtrNewCollapsedRange(xmlNodePtr start) { + */ + xmlXPathObjectPtr + xmlXPtrNewRangeNodeObject(xmlNodePtr start, xmlXPathObjectPtr end) { ++ xmlNodePtr endNode; ++ int endIndex; + xmlXPathObjectPtr ret; + + if (start == NULL) +@@ -549,7 +531,12 @@ xmlXPtrNewRangeNodeObject(xmlNodePtr start, xmlXPathObjectPtr end) { + return(NULL); + switch (end->type) { + case XPATH_POINT: ++ endNode = end->user; ++ endIndex = end->index; ++ break; + case XPATH_RANGE: ++ endNode = end->user2; ++ endIndex = end->index2; + break; + case XPATH_NODESET: + /* +@@ -557,39 +544,15 @@ xmlXPtrNewRangeNodeObject(xmlNodePtr start, xmlXPathObjectPtr end) { + */ + if (end->nodesetval->nodeNr <= 0) + return(NULL); ++ endNode = end->nodesetval->nodeTab[end->nodesetval->nodeNr - 1]; ++ endIndex = -1; + break; + default: + /* TODO */ + return(NULL); + } + +- ret = (xmlXPathObjectPtr) xmlMalloc(sizeof(xmlXPathObject)); +- if (ret == NULL) { +- xmlXPtrErrMemory("allocating range"); +- return(NULL); +- } +- memset(ret, 0 , (size_t) sizeof(xmlXPathObject)); +- ret->type = XPATH_RANGE; +- ret->user = start; +- ret->index = -1; +- switch (end->type) { +- case XPATH_POINT: +- ret->user2 = end->user; +- ret->index2 = end->index; +- break; +- case XPATH_RANGE: +- ret->user2 = end->user2; +- ret->index2 = end->index2; +- break; +- case XPATH_NODESET: { +- ret->user2 = end->nodesetval->nodeTab[end->nodesetval->nodeNr - 1]; +- ret->index2 = -1; +- break; +- } +- default: +- STRANGE +- return(NULL); +- } ++ ret = xmlXPtrNewRangeInternal(start, -1, endNode, endIndex); + xmlXPtrRangeCheckOrder(ret); + return(ret); + } +@@ -1835,8 +1798,8 @@ xmlXPtrStartPointFunction(xmlXPathParserContextPtr ctxt, int nargs) { + case XPATH_RANGE: { + xmlNodePtr node = tmp->user; + if (node != NULL) { +- if (node->type == XML_ATTRIBUTE_NODE) { +- /* TODO: Namespace Nodes ??? */ ++ if ((node->type == XML_ATTRIBUTE_NODE) || ++ (node->type == XML_NAMESPACE_DECL)) { + xmlXPathFreeObject(obj); + xmlXPtrFreeLocationSet(newset); + XP_ERROR(XPTR_SYNTAX_ERROR); +@@ -1931,8 +1894,8 @@ xmlXPtrEndPointFunction(xmlXPathParserContextPtr ctxt, int nargs) { + case XPATH_RANGE: { + xmlNodePtr node = tmp->user2; + if (node != NULL) { +- if (node->type == XML_ATTRIBUTE_NODE) { +- /* TODO: Namespace Nodes ??? */ ++ if ((node->type == XML_ATTRIBUTE_NODE) || ++ (node->type == XML_NAMESPACE_DECL)) { + xmlXPathFreeObject(obj); + xmlXPtrFreeLocationSet(newset); + XP_ERROR(XPTR_SYNTAX_ERROR); diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-5131.patch b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-5131.patch new file mode 100644 index 000000000..9d47d023a --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-CVE-2016-5131.patch @@ -0,0 +1,180 @@ +From 9ab01a277d71f54d3143c2cf333c5c2e9aaedd9e Mon Sep 17 00:00:00 2001 +From: Nick Wellnhofer <wellnhofer@aevum.de> +Date: Tue, 28 Jun 2016 14:22:23 +0200 +Subject: [PATCH] Fix XPointer paths beginning with range-to + +The old code would invoke the broken xmlXPtrRangeToFunction. range-to +isn't really a function but a special kind of location step. Remove +this function and always handle range-to in the XPath code. + +The old xmlXPtrRangeToFunction could also be abused to trigger a +use-after-free error with the potential for remote code execution. + +Found with afl-fuzz. + +Fixes CVE-2016-5131. + +CVE: CVE-2016-5131 +Upstream-Status: Backport +https://git.gnome.org/browse/libxml2/commit/?id=9ab01a277d71f54d3143c2cf333c5c2e9aaedd9e + +Signed-off-by: Yi Zhao <yi.zhao@windirver.com> +--- + result/XPath/xptr/vidbase | 13 ++++++++ + test/XPath/xptr/vidbase | 1 + + xpath.c | 7 ++++- + xpointer.c | 76 ++++------------------------------------------- + 4 files changed, 26 insertions(+), 71 deletions(-) + +diff --git a/result/XPath/xptr/vidbase b/result/XPath/xptr/vidbase +index 8b9e92d..f19193e 100644 +--- a/result/XPath/xptr/vidbase ++++ b/result/XPath/xptr/vidbase +@@ -17,3 +17,16 @@ Object is a Location Set: + To node + ELEMENT p + ++ ++======================== ++Expression: xpointer(range-to(id('chapter2'))) ++Object is a Location Set: ++1 : Object is a range : ++ From node ++ / ++ To node ++ ELEMENT chapter ++ ATTRIBUTE id ++ TEXT ++ content=chapter2 ++ +diff --git a/test/XPath/xptr/vidbase b/test/XPath/xptr/vidbase +index b146383..884b106 100644 +--- a/test/XPath/xptr/vidbase ++++ b/test/XPath/xptr/vidbase +@@ -1,2 +1,3 @@ + xpointer(id('chapter1')/p) + xpointer(id('chapter1')/p[1]/range-to(following-sibling::p[2])) ++xpointer(range-to(id('chapter2'))) +diff --git a/xpath.c b/xpath.c +index d992841..5a01b1b 100644 +--- a/xpath.c ++++ b/xpath.c +@@ -10691,13 +10691,18 @@ xmlXPathCompPathExpr(xmlXPathParserContextPtr ctxt) { + lc = 1; + break; + } else if ((NXT(len) == '(')) { +- /* Note Type or Function */ ++ /* Node Type or Function */ + if (xmlXPathIsNodeType(name)) { + #ifdef DEBUG_STEP + xmlGenericError(xmlGenericErrorContext, + "PathExpr: Type search\n"); + #endif + lc = 1; ++#ifdef LIBXML_XPTR_ENABLED ++ } else if (ctxt->xptr && ++ xmlStrEqual(name, BAD_CAST "range-to")) { ++ lc = 1; ++#endif + } else { + #ifdef DEBUG_STEP + xmlGenericError(xmlGenericErrorContext, +diff --git a/xpointer.c b/xpointer.c +index 676c510..d74174a 100644 +--- a/xpointer.c ++++ b/xpointer.c +@@ -1332,8 +1332,6 @@ xmlXPtrNewContext(xmlDocPtr doc, xmlNodePtr here, xmlNodePtr origin) { + ret->here = here; + ret->origin = origin; + +- xmlXPathRegisterFunc(ret, (xmlChar *)"range-to", +- xmlXPtrRangeToFunction); + xmlXPathRegisterFunc(ret, (xmlChar *)"range", + xmlXPtrRangeFunction); + xmlXPathRegisterFunc(ret, (xmlChar *)"range-inside", +@@ -2243,76 +2241,14 @@ xmlXPtrRangeInsideFunction(xmlXPathParserContextPtr ctxt, int nargs) { + * @nargs: the number of args + * + * Implement the range-to() XPointer function ++ * ++ * Obsolete. range-to is not a real function but a special type of location ++ * step which is handled in xpath.c. + */ + void +-xmlXPtrRangeToFunction(xmlXPathParserContextPtr ctxt, int nargs) { +- xmlXPathObjectPtr range; +- const xmlChar *cur; +- xmlXPathObjectPtr res, obj; +- xmlXPathObjectPtr tmp; +- xmlLocationSetPtr newset = NULL; +- xmlNodeSetPtr oldset; +- int i; +- +- if (ctxt == NULL) return; +- CHECK_ARITY(1); +- /* +- * Save the expression pointer since we will have to evaluate +- * it multiple times. Initialize the new set. +- */ +- CHECK_TYPE(XPATH_NODESET); +- obj = valuePop(ctxt); +- oldset = obj->nodesetval; +- ctxt->context->node = NULL; +- +- cur = ctxt->cur; +- newset = xmlXPtrLocationSetCreate(NULL); +- +- for (i = 0; i < oldset->nodeNr; i++) { +- ctxt->cur = cur; +- +- /* +- * Run the evaluation with a node list made of a single item +- * in the nodeset. +- */ +- ctxt->context->node = oldset->nodeTab[i]; +- tmp = xmlXPathNewNodeSet(ctxt->context->node); +- valuePush(ctxt, tmp); +- +- xmlXPathEvalExpr(ctxt); +- CHECK_ERROR; +- +- /* +- * The result of the evaluation need to be tested to +- * decided whether the filter succeeded or not +- */ +- res = valuePop(ctxt); +- range = xmlXPtrNewRangeNodeObject(oldset->nodeTab[i], res); +- if (range != NULL) { +- xmlXPtrLocationSetAdd(newset, range); +- } +- +- /* +- * Cleanup +- */ +- if (res != NULL) +- xmlXPathFreeObject(res); +- if (ctxt->value == tmp) { +- res = valuePop(ctxt); +- xmlXPathFreeObject(res); +- } +- +- ctxt->context->node = NULL; +- } +- +- /* +- * The result is used as the new evaluation set. +- */ +- xmlXPathFreeObject(obj); +- ctxt->context->node = NULL; +- ctxt->context->contextSize = -1; +- ctxt->context->proximityPosition = -1; +- valuePush(ctxt, xmlXPtrWrapLocationSet(newset)); ++xmlXPtrRangeToFunction(xmlXPathParserContextPtr ctxt, ++ int nargs ATTRIBUTE_UNUSED) { ++ XP_ERROR(XPATH_EXPR_ERROR); + } + + /** +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_NULL_pointer_derefs.patch b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_NULL_pointer_derefs.patch new file mode 100644 index 000000000..83552ca3e --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_NULL_pointer_derefs.patch @@ -0,0 +1,46 @@ +libxml2-2.9.4: Fix more NULL pointer derefs + +xpointer: Fix more NULL pointer derefs + +Upstream-Status: Backported [https://git.gnome.org/browse/libxml2/commit/?id=e905f08123e4a6e7731549e6f09dadff4cab65bd] +CVE: - +Signed-off-by: Andrej Valek <andrej.valek@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +diff --git a/xpointer.c b/xpointer.c +index 676c510..074db24 100644 +--- a/xpointer.c ++++ b/xpointer.c +@@ -555,7 +555,7 @@ xmlXPtrNewRangeNodeObject(xmlNodePtr start, xmlXPathObjectPtr end) { + /* + * Empty set ... + */ +- if (end->nodesetval->nodeNr <= 0) ++ if ((end->nodesetval == NULL) || (end->nodesetval->nodeNr <= 0)) + return(NULL); + break; + default: +@@ -1400,7 +1400,7 @@ xmlXPtrEval(const xmlChar *str, xmlXPathContextPtr ctx) { + */ + xmlNodeSetPtr set; + set = tmp->nodesetval; +- if ((set->nodeNr != 1) || ++ if ((set == NULL) || (set->nodeNr != 1) || + (set->nodeTab[0] != (xmlNodePtr) ctx->doc)) + stack++; + } else +@@ -2073,9 +2073,11 @@ xmlXPtrRangeFunction(xmlXPathParserContextPtr ctxt, int nargs) { + xmlXPathFreeObject(set); + XP_ERROR(XPATH_MEMORY_ERROR); + } +- for (i = 0;i < oldset->locNr;i++) { +- xmlXPtrLocationSetAdd(newset, +- xmlXPtrCoveringRange(ctxt, oldset->locTab[i])); ++ if (oldset != NULL) { ++ for (i = 0;i < oldset->locNr;i++) { ++ xmlXPtrLocationSetAdd(newset, ++ xmlXPtrCoveringRange(ctxt, oldset->locTab[i])); ++ } + } + + /* diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_node_comparison.patch b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_node_comparison.patch new file mode 100644 index 000000000..11718bb2b --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2/libxml2-fix_node_comparison.patch @@ -0,0 +1,67 @@ +libxml2-2.9.4: Fix comparison with root node in xmlXPathCmpNodes and NULL pointer deref in XPointer + +xpath: + - Check for errors after evaluating first operand. + - Add sanity check for empty stack. + - Include comparation in changes from xmlXPathCmpNodesExt to xmlXPathCmpNodes + +Upstream-Status: Backported + - [https://git.gnome.org/browse/libxml2/commit/?id=c1d1f7121194036608bf555f08d3062a36fd344b] + - [https://git.gnome.org/browse/libxml2/commit/?id=a005199330b86dada19d162cae15ef9bdcb6baa8] +CVE: necessary changes for fixing CVE-2016-5131 +Signed-off-by: Andrej Valek <andrej.valek@siemens.com> +Signed-off-by: Pascal Bach <pascal.bach@siemens.com> + +diff --git a/result/XPath/xptr/viderror b/result/XPath/xptr/viderror +new file mode 100644 +index 0000000..d589882 +--- /dev/null ++++ b/result/XPath/xptr/viderror +@@ -0,0 +1,4 @@ ++ ++======================== ++Expression: xpointer(non-existing-fn()/range-to(id('chapter2'))) ++Object is empty (NULL) +diff --git a/test/XPath/xptr/viderror b/test/XPath/xptr/viderror +new file mode 100644 +index 0000000..da8c53b +--- /dev/null ++++ b/test/XPath/xptr/viderror +@@ -0,0 +1 @@ ++xpointer(non-existing-fn()/range-to(id('chapter2'))) +diff --git a/xpath.c b/xpath.c +index 113bce6..d992841 100644 +--- a/xpath.c ++++ b/xpath.c +@@ -3342,13 +3342,13 @@ xmlXPathCmpNodes(xmlNodePtr node1, xmlNodePtr node2) { + * compute depth to root + */ + for (depth2 = 0, cur = node2;cur->parent != NULL;cur = cur->parent) { +- if (cur == node1) ++ if (cur->parent == node1) + return(1); + depth2++; + } + root = cur; + for (depth1 = 0, cur = node1;cur->parent != NULL;cur = cur->parent) { +- if (cur == node2) ++ if (cur->parent == node2) + return(-1); + depth1++; + } +@@ -14005,9 +14005,14 @@ xmlXPathCompOpEval(xmlXPathParserContextPtr ctxt, xmlXPathStepOpPtr op) + xmlNodeSetPtr oldset; + int i, j; + +- if (op->ch1 != -1) ++ if (op->ch1 != -1) { + total += + xmlXPathCompOpEval(ctxt, &comp->steps[op->ch1]); ++ CHECK_ERROR0; ++ } ++ if (ctxt->value == NULL) { ++ XP_ERROR0(XPATH_INVALID_OPERAND); ++ } + if (op->ch2 == -1) + return (total); + diff --git a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2_2.9.4.bb b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2_2.9.4.bb index 59874bec2..c2c3c9326 100644 --- a/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2_2.9.4.bb +++ b/import-layers/yocto-poky/meta/recipes-core/libxml/libxml2_2.9.4.bb @@ -19,6 +19,11 @@ SRC_URI = "ftp://xmlsoft.org/libxml2/libxml2-${PV}.tar.gz;name=libtar \ file://run-ptest \ file://python-sitepackages-dir.patch \ file://libxml-m4-use-pkgconfig.patch \ + file://libxml2-fix_node_comparison.patch \ + file://libxml2-CVE-2016-5131.patch \ + file://libxml2-CVE-2016-4658.patch \ + file://libxml2-fix_NULL_pointer_derefs.patch \ + file://CVE-2016-9318.patch \ " SRC_URI[libtar.md5sum] = "ae249165c173b1ff386ee8ad676815f5" diff --git a/import-layers/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb b/import-layers/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb index 274e11a8b..43fc599c7 100644 --- a/import-layers/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb +++ b/import-layers/yocto-poky/meta/recipes-core/packagegroups/packagegroup-core-standalone-sdk-target.bb @@ -12,3 +12,8 @@ RDEPENDS_${PN} = "\ libstdc++-dev \ ${LIBC_DEPENDENCIES} \ " + +RRECOMMENDS_${PN} = "\ + libssp \ + libssp-dev \ + " diff --git a/import-layers/yocto-poky/meta/recipes-core/systemd/systemd/validate-user.patch b/import-layers/yocto-poky/meta/recipes-core/systemd/systemd/validate-user.patch new file mode 100644 index 000000000..8e0e0c1b9 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-core/systemd/systemd/validate-user.patch @@ -0,0 +1,856 @@ +If a user is created with a strictly-speaking invalid name such as '0day' and a +unit created to run as that user, systemd rejects the username and runs the unit +as root. + +CVE: CVE-2017-1000082 +Upstream-Status: Backport +Signed-off-by: Ross Burton <ross.burton@intel.com> + +From e0c4eb1435d50cb3797cf94100d4886dc2022bce Mon Sep 17 00:00:00 2001 +From: Lennart Poettering <lennart@poettering.net> +Date: Thu, 14 Jul 2016 12:23:39 +0200 +Subject: [PATCH 1/3] sysusers: move various user credential validity checks to + src/basic/ + +This way we can reuse them for validating User=/Group= settings in unit files +(to be added in a later commit). + +Also, add some tests for them. +--- + src/basic/user-util.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++ + src/basic/user-util.h | 5 +++ + src/sysusers/sysusers.c | 75 -------------------------------------- + src/test/test-user-util.c | 87 ++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 185 insertions(+), 75 deletions(-) + +diff --git a/src/basic/user-util.c b/src/basic/user-util.c +index f65ca3eda..c85b5c6a8 100644 +--- a/src/basic/user-util.c ++++ b/src/basic/user-util.c +@@ -29,6 +29,7 @@ + #include <string.h> + #include <sys/stat.h> + #include <unistd.h> ++#include <utmp.h> + + #include "missing.h" + #include "alloc-util.h" +@@ -39,6 +40,7 @@ + #include "path-util.h" + #include "string-util.h" + #include "user-util.h" ++#include "utf8.h" + + bool uid_is_valid(uid_t uid) { + +@@ -479,3 +481,94 @@ int take_etc_passwd_lock(const char *root) { + + return fd; + } ++ ++bool valid_user_group_name(const char *u) { ++ const char *i; ++ long sz; ++ ++ /* Checks if the specified name is a valid user/group name. */ ++ ++ if (isempty(u)) ++ return false; ++ ++ if (!(u[0] >= 'a' && u[0] <= 'z') && ++ !(u[0] >= 'A' && u[0] <= 'Z') && ++ u[0] != '_') ++ return false; ++ ++ for (i = u+1; *i; i++) { ++ if (!(*i >= 'a' && *i <= 'z') && ++ !(*i >= 'A' && *i <= 'Z') && ++ !(*i >= '0' && *i <= '9') && ++ *i != '_' && ++ *i != '-') ++ return false; ++ } ++ ++ sz = sysconf(_SC_LOGIN_NAME_MAX); ++ assert_se(sz > 0); ++ ++ if ((size_t) (i-u) > (size_t) sz) ++ return false; ++ ++ if ((size_t) (i-u) > UT_NAMESIZE - 1) ++ return false; ++ ++ return true; ++} ++ ++bool valid_user_group_name_or_id(const char *u) { ++ ++ /* Similar as above, but is also fine with numeric UID/GID specifications, as long as they are in the right ++ * range, and not the invalid user ids. */ ++ ++ if (isempty(u)) ++ return false; ++ ++ if (valid_user_group_name(u)) ++ return true; ++ ++ return parse_uid(u, NULL) >= 0; ++} ++ ++bool valid_gecos(const char *d) { ++ ++ if (!d) ++ return false; ++ ++ if (!utf8_is_valid(d)) ++ return false; ++ ++ if (string_has_cc(d, NULL)) ++ return false; ++ ++ /* Colons are used as field separators, and hence not OK */ ++ if (strchr(d, ':')) ++ return false; ++ ++ return true; ++} ++ ++bool valid_home(const char *p) { ++ ++ if (isempty(p)) ++ return false; ++ ++ if (!utf8_is_valid(p)) ++ return false; ++ ++ if (string_has_cc(p, NULL)) ++ return false; ++ ++ if (!path_is_absolute(p)) ++ return false; ++ ++ if (!path_is_safe(p)) ++ return false; ++ ++ /* Colons are used as field separators, and hence not OK */ ++ if (strchr(p, ':')) ++ return false; ++ ++ return true; ++} +diff --git a/src/basic/user-util.h b/src/basic/user-util.h +index 8026eca3f..36f71fb00 100644 +--- a/src/basic/user-util.h ++++ b/src/basic/user-util.h +@@ -68,3 +68,8 @@ int take_etc_passwd_lock(const char *root); + static inline bool userns_supported(void) { + return access("/proc/self/uid_map", F_OK) >= 0; + } ++ ++bool valid_user_group_name(const char *u); ++bool valid_user_group_name_or_id(const char *u); ++bool valid_gecos(const char *d); ++bool valid_home(const char *p); +diff --git a/src/sysusers/sysusers.c b/src/sysusers/sysusers.c +index 4377f1b91..df3b7de30 100644 +--- a/src/sysusers/sysusers.c ++++ b/src/sysusers/sysusers.c +@@ -1299,81 +1299,6 @@ static bool item_equal(Item *a, Item *b) { + return true; + } + +-static bool valid_user_group_name(const char *u) { +- const char *i; +- long sz; +- +- if (isempty(u)) +- return false; +- +- if (!(u[0] >= 'a' && u[0] <= 'z') && +- !(u[0] >= 'A' && u[0] <= 'Z') && +- u[0] != '_') +- return false; +- +- for (i = u+1; *i; i++) { +- if (!(*i >= 'a' && *i <= 'z') && +- !(*i >= 'A' && *i <= 'Z') && +- !(*i >= '0' && *i <= '9') && +- *i != '_' && +- *i != '-') +- return false; +- } +- +- sz = sysconf(_SC_LOGIN_NAME_MAX); +- assert_se(sz > 0); +- +- if ((size_t) (i-u) > (size_t) sz) +- return false; +- +- if ((size_t) (i-u) > UT_NAMESIZE - 1) +- return false; +- +- return true; +-} +- +-static bool valid_gecos(const char *d) { +- +- if (!d) +- return false; +- +- if (!utf8_is_valid(d)) +- return false; +- +- if (string_has_cc(d, NULL)) +- return false; +- +- /* Colons are used as field separators, and hence not OK */ +- if (strchr(d, ':')) +- return false; +- +- return true; +-} +- +-static bool valid_home(const char *p) { +- +- if (isempty(p)) +- return false; +- +- if (!utf8_is_valid(p)) +- return false; +- +- if (string_has_cc(p, NULL)) +- return false; +- +- if (!path_is_absolute(p)) +- return false; +- +- if (!path_is_safe(p)) +- return false; +- +- /* Colons are used as field separators, and hence not OK */ +- if (strchr(p, ':')) +- return false; +- +- return true; +-} +- + static int parse_line(const char *fname, unsigned line, const char *buffer) { + + static const Specifier specifier_table[] = { +diff --git a/src/test/test-user-util.c b/src/test/test-user-util.c +index 8d1ec19f1..2a344a9f9 100644 +--- a/src/test/test-user-util.c ++++ b/src/test/test-user-util.c +@@ -61,6 +61,88 @@ static void test_uid_ptr(void) { + assert_se(PTR_TO_UID(UID_TO_PTR(1000)) == 1000); + } + ++static void test_valid_user_group_name(void) { ++ assert_se(!valid_user_group_name(NULL)); ++ assert_se(!valid_user_group_name("")); ++ assert_se(!valid_user_group_name("1")); ++ assert_se(!valid_user_group_name("65535")); ++ assert_se(!valid_user_group_name("-1")); ++ assert_se(!valid_user_group_name("-kkk")); ++ assert_se(!valid_user_group_name("rööt")); ++ assert_se(!valid_user_group_name(".")); ++ assert_se(!valid_user_group_name("eff.eff")); ++ assert_se(!valid_user_group_name("foo\nbar")); ++ assert_se(!valid_user_group_name("0123456789012345678901234567890123456789")); ++ assert_se(!valid_user_group_name_or_id("aaa:bbb")); ++ ++ assert_se(valid_user_group_name("root")); ++ assert_se(valid_user_group_name("lennart")); ++ assert_se(valid_user_group_name("LENNART")); ++ assert_se(valid_user_group_name("_kkk")); ++ assert_se(valid_user_group_name("kkk-")); ++ assert_se(valid_user_group_name("kk-k")); ++ ++ assert_se(valid_user_group_name("some5")); ++ assert_se(!valid_user_group_name("5some")); ++ assert_se(valid_user_group_name("INNER5NUMBER")); ++} ++ ++static void test_valid_user_group_name_or_id(void) { ++ assert_se(!valid_user_group_name_or_id(NULL)); ++ assert_se(!valid_user_group_name_or_id("")); ++ assert_se(valid_user_group_name_or_id("0")); ++ assert_se(valid_user_group_name_or_id("1")); ++ assert_se(valid_user_group_name_or_id("65534")); ++ assert_se(!valid_user_group_name_or_id("65535")); ++ assert_se(valid_user_group_name_or_id("65536")); ++ assert_se(!valid_user_group_name_or_id("-1")); ++ assert_se(!valid_user_group_name_or_id("-kkk")); ++ assert_se(!valid_user_group_name_or_id("rööt")); ++ assert_se(!valid_user_group_name_or_id(".")); ++ assert_se(!valid_user_group_name_or_id("eff.eff")); ++ assert_se(!valid_user_group_name_or_id("foo\nbar")); ++ assert_se(!valid_user_group_name_or_id("0123456789012345678901234567890123456789")); ++ assert_se(!valid_user_group_name_or_id("aaa:bbb")); ++ ++ assert_se(valid_user_group_name_or_id("root")); ++ assert_se(valid_user_group_name_or_id("lennart")); ++ assert_se(valid_user_group_name_or_id("LENNART")); ++ assert_se(valid_user_group_name_or_id("_kkk")); ++ assert_se(valid_user_group_name_or_id("kkk-")); ++ assert_se(valid_user_group_name_or_id("kk-k")); ++ ++ assert_se(valid_user_group_name_or_id("some5")); ++ assert_se(!valid_user_group_name_or_id("5some")); ++ assert_se(valid_user_group_name_or_id("INNER5NUMBER")); ++} ++ ++static void test_valid_gecos(void) { ++ ++ assert_se(!valid_gecos(NULL)); ++ assert_se(valid_gecos("")); ++ assert_se(valid_gecos("test")); ++ assert_se(valid_gecos("Ăśmläüt")); ++ assert_se(!valid_gecos("In\nvalid")); ++ assert_se(!valid_gecos("In:valid")); ++} ++ ++static void test_valid_home(void) { ++ ++ assert_se(!valid_home(NULL)); ++ assert_se(!valid_home("")); ++ assert_se(!valid_home(".")); ++ assert_se(!valid_home("/home/..")); ++ assert_se(!valid_home("/home/../")); ++ assert_se(!valid_home("/home\n/foo")); ++ assert_se(!valid_home("./piep")); ++ assert_se(!valid_home("piep")); ++ assert_se(!valid_home("/home/user:lennart")); ++ ++ assert_se(valid_home("/")); ++ assert_se(valid_home("/home")); ++ assert_se(valid_home("/home/foo")); ++} ++ + int main(int argc, char*argv[]) { + + test_uid_to_name_one(0, "root"); +@@ -75,5 +157,10 @@ int main(int argc, char*argv[]) { + test_parse_uid(); + test_uid_ptr(); + ++ test_valid_user_group_name(); ++ test_valid_user_group_name_or_id(); ++ test_valid_gecos(); ++ test_valid_home(); ++ + return 0; + } +-- +2.11.0 + + +From 1affacaaf6eff93e53563a644567cc5c3930cb28 Mon Sep 17 00:00:00 2001 +From: Lennart Poettering <lennart@poettering.net> +Date: Thu, 14 Jul 2016 12:28:06 +0200 +Subject: [PATCH 2/3] core: be stricter when parsing User=/Group= fields + +Let's verify the validity of the syntax of the user/group names set. +--- + src/core/load-fragment-gperf.gperf.m4 | 10 +-- + src/core/load-fragment.c | 118 ++++++++++++++++++++++++++++++++++ + src/core/load-fragment.h | 2 + + 3 files changed, 125 insertions(+), 5 deletions(-) + +diff --git a/src/core/load-fragment-gperf.gperf.m4 b/src/core/load-fragment-gperf.gperf.m4 +index 819341898..110089696 100644 +--- a/src/core/load-fragment-gperf.gperf.m4 ++++ b/src/core/load-fragment-gperf.gperf.m4 +@@ -19,9 +19,9 @@ m4_dnl Define the context options only once + m4_define(`EXEC_CONTEXT_CONFIG_ITEMS', + `$1.WorkingDirectory, config_parse_working_directory, 0, offsetof($1, exec_context) + $1.RootDirectory, config_parse_unit_path_printf, 0, offsetof($1, exec_context.root_directory) +-$1.User, config_parse_unit_string_printf, 0, offsetof($1, exec_context.user) +-$1.Group, config_parse_unit_string_printf, 0, offsetof($1, exec_context.group) +-$1.SupplementaryGroups, config_parse_strv, 0, offsetof($1, exec_context.supplementary_groups) ++$1.User, config_parse_user_group, 0, offsetof($1, exec_context.user) ++$1.Group, config_parse_user_group, 0, offsetof($1, exec_context.group) ++$1.SupplementaryGroups, config_parse_user_group_strv, 0, offsetof($1, exec_context.supplementary_groups) + $1.Nice, config_parse_exec_nice, 0, offsetof($1, exec_context) + $1.OOMScoreAdjust, config_parse_exec_oom_score_adjust, 0, offsetof($1, exec_context) + $1.IOSchedulingClass, config_parse_exec_io_class, 0, offsetof($1, exec_context) +@@ -275,8 +275,8 @@ Socket.ExecStartPost, config_parse_exec, SOCKET_EXEC + Socket.ExecStopPre, config_parse_exec, SOCKET_EXEC_STOP_PRE, offsetof(Socket, exec_command) + Socket.ExecStopPost, config_parse_exec, SOCKET_EXEC_STOP_POST, offsetof(Socket, exec_command) + Socket.TimeoutSec, config_parse_sec, 0, offsetof(Socket, timeout_usec) +-Socket.SocketUser, config_parse_unit_string_printf, 0, offsetof(Socket, user) +-Socket.SocketGroup, config_parse_unit_string_printf, 0, offsetof(Socket, group) ++Socket.SocketUser, config_parse_user_group, 0, offsetof(Socket, user) ++Socket.SocketGroup, config_parse_user_group, 0, offsetof(Socket, group) + Socket.SocketMode, config_parse_mode, 0, offsetof(Socket, socket_mode) + Socket.DirectoryMode, config_parse_mode, 0, offsetof(Socket, directory_mode) + Socket.Accept, config_parse_bool, 0, offsetof(Socket, accept) +diff --git a/src/core/load-fragment.c b/src/core/load-fragment.c +index 86b4fb071..f43781803 100644 +--- a/src/core/load-fragment.c ++++ b/src/core/load-fragment.c +@@ -64,6 +64,7 @@ + #include "unit-name.h" + #include "unit-printf.h" + #include "unit.h" ++#include "user-util.h" + #include "utf8.h" + #include "web-util.h" + +@@ -1758,6 +1759,123 @@ int config_parse_sec_fix_0( + return 0; + } + ++int config_parse_user_group( ++ const char *unit, ++ const char *filename, ++ unsigned line, ++ const char *section, ++ unsigned section_line, ++ const char *lvalue, ++ int ltype, ++ const char *rvalue, ++ void *data, ++ void *userdata) { ++ ++ char **user = data, *n; ++ Unit *u = userdata; ++ int r; ++ ++ assert(filename); ++ assert(lvalue); ++ assert(rvalue); ++ assert(u); ++ ++ if (isempty(rvalue)) ++ n = NULL; ++ else { ++ _cleanup_free_ char *k = NULL; ++ ++ r = unit_full_printf(u, rvalue, &k); ++ if (r < 0) { ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s, ignoring: %m", rvalue); ++ return 0; ++ } ++ ++ if (!valid_user_group_name_or_id(k)) { ++ log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID, ignoring: %s", k); ++ return 0; ++ } ++ ++ n = k; ++ k = NULL; ++ } ++ ++ free(*user); ++ *user = n; ++ ++ return 0; ++} ++ ++int config_parse_user_group_strv( ++ const char *unit, ++ const char *filename, ++ unsigned line, ++ const char *section, ++ unsigned section_line, ++ const char *lvalue, ++ int ltype, ++ const char *rvalue, ++ void *data, ++ void *userdata) { ++ ++ char ***users = data; ++ Unit *u = userdata; ++ const char *p; ++ int r; ++ ++ assert(filename); ++ assert(lvalue); ++ assert(rvalue); ++ assert(u); ++ ++ if (isempty(rvalue)) { ++ char **empty; ++ ++ empty = new0(char*, 1); ++ if (!empty) ++ return log_oom(); ++ ++ strv_free(*users); ++ *users = empty; ++ ++ return 0; ++ } ++ ++ p = rvalue; ++ for (;;) { ++ _cleanup_free_ char *word = NULL, *k = NULL; ++ ++ r = extract_first_word(&p, &word, WHITESPACE, 0); ++ if (r == 0) ++ break; ++ if (r == -ENOMEM) ++ return log_oom(); ++ if (r < 0) { ++ log_syntax(unit, LOG_ERR, filename, line, r, "Invalid syntax, ignoring: %s", rvalue); ++ break; ++ } ++ ++ r = unit_full_printf(u, word, &k); ++ if (r < 0) { ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s, ignoring: %m", word); ++ continue; ++ } ++ ++ if (!valid_user_group_name_or_id(k)) { ++ log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID, ignoring: %s", k); ++ continue; ++ } ++ ++ r = strv_push(users, k); ++ if (r < 0) ++ return log_oom(); ++ ++ k = NULL; ++ } ++ ++ return 0; ++} ++ + int config_parse_busname_service( + const char *unit, + const char *filename, +diff --git a/src/core/load-fragment.h b/src/core/load-fragment.h +index b36a2e3a0..213bce55a 100644 +--- a/src/core/load-fragment.h ++++ b/src/core/load-fragment.h +@@ -111,6 +111,8 @@ int config_parse_exec_utmp_mode(const char *unit, const char *filename, unsigned + int config_parse_working_directory(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata); + int config_parse_fdname(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata); + int config_parse_sec_fix_0(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata); ++int config_parse_user_group(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata); ++int config_parse_user_group_strv(const char *unit, const char *filename, unsigned line, const char *section, unsigned section_line, const char *lvalue, int ltype, const char *rvalue, void *data, void *userdata); + + /* gperf prototypes */ + const struct ConfigPerfItem* load_fragment_gperf_lookup(const char *key, unsigned length); +-- +2.11.0 + + +From 97e0456384ed5c930394062d340237ea6130ece0 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= <zbyszek@in.waw.pl> +Date: Thu, 6 Jul 2017 13:28:19 -0400 +Subject: [PATCH 3/3] core/load-fragment: refuse units with errors in certain + directives + +If an error is encountered in any of the Exec* lines, WorkingDirectory, +SELinuxContext, ApparmorProfile, SmackProcessLabel, Service (in .socket +units), User, or Group, refuse to load the unit. If the config stanza +has support, ignore the failure if '-' is present. + +For those configuration directives, even if we started the unit, it's +pretty likely that it'll do something unexpected (like write files +in a wrong place, or with a wrong context, or run with wrong permissions, +etc). It seems better to refuse to start the unit and have the admin +clean up the configuration without giving the service a chance to mess +up stuff. + +Note that all "security" options that restrict what the unit can do +(Capabilities, AmbientCapabilities, Restrict*, SystemCallFilter, Limit*, +PrivateDevices, Protect*, etc) are _not_ treated like this. Such options are +only supplementary, and are not always available depending on the architecture +and compilation options, so unit authors have to make sure that the service +runs correctly without them anyway. + +Fixes #6237, #6277. + +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + src/core/load-fragment.c | 101 ++++++++++++++++++++++++++++------------------ + src/test/test-unit-file.c | 14 +++---- + 2 files changed, 69 insertions(+), 46 deletions(-) + +diff --git a/src/core/load-fragment.c b/src/core/load-fragment.c +index f43781803..b1fb1d407 100644 +--- a/src/core/load-fragment.c ++++ b/src/core/load-fragment.c +@@ -626,20 +626,28 @@ int config_parse_exec( + + if (isempty(f)) { + /* First word is either "-" or "@" with no command. */ +- log_syntax(unit, LOG_ERR, filename, line, 0, "Empty path in command line, ignoring: \"%s\"", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Empty path in command line%s: \"%s\"", ++ ignore ? ", ignoring" : "", rvalue); ++ return ignore ? 0 : -ENOEXEC; + } + if (!string_is_safe(f)) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Executable path contains special characters, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Executable path contains special characters%s: %s", ++ ignore ? ", ignoring" : "", rvalue); ++ return ignore ? 0 : -ENOEXEC; + } + if (!path_is_absolute(f)) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Executable path is not absolute, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Executable path is not absolute%s: %s", ++ ignore ? ", ignoring" : "", rvalue); ++ return ignore ? 0 : -ENOEXEC; + } + if (endswith(f, "/")) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Executable path specifies a directory, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Executable path specifies a directory%s: %s", ++ ignore ? ", ignoring" : "", rvalue); ++ return ignore ? 0 : -ENOEXEC; + } + + if (f == firstword) { +@@ -695,7 +703,7 @@ int config_parse_exec( + if (r == 0) + break; + else if (r < 0) +- return 0; ++ return ignore ? 0 : -ENOEXEC; + + if (!GREEDY_REALLOC(n, nbufsize, nlen + 2)) + return log_oom(); +@@ -705,8 +713,10 @@ int config_parse_exec( + } + + if (!n || !n[0]) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Empty executable name or zeroeth argument, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Empty executable name or zeroeth argument%s: %s", ++ ignore ? ", ignoring" : "", rvalue); ++ return ignore ? 0 : -ENOEXEC; + } + + nce = new0(ExecCommand, 1); +@@ -1214,8 +1224,10 @@ int config_parse_exec_selinux_context( + + r = unit_name_printf(u, rvalue, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve specifiers, ignoring: %m"); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, ++ "Failed to resolve specifiers%s: %m", ++ ignore ? ", ignoring" : ""); ++ return ignore ? 0 : -ENOEXEC; + } + + free(c->selinux_context); +@@ -1262,8 +1274,10 @@ int config_parse_exec_apparmor_profile( + + r = unit_name_printf(u, rvalue, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve specifiers, ignoring: %m"); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, ++ "Failed to resolve specifiers%s: %m", ++ ignore ? ", ignoring" : ""); ++ return ignore ? 0 : -ENOEXEC; + } + + free(c->apparmor_profile); +@@ -1310,8 +1324,10 @@ int config_parse_exec_smack_process_label( + + r = unit_name_printf(u, rvalue, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve specifiers, ignoring: %m"); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, ++ "Failed to resolve specifiers%s: %m", ++ ignore ? ", ignoring" : ""); ++ return ignore ? 0 : -ENOEXEC; + } + + free(c->smack_process_label); +@@ -1520,19 +1536,19 @@ int config_parse_socket_service( + + r = unit_name_printf(UNIT(s), rvalue, &p); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve specifiers, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve specifiers: %s", rvalue); ++ return -ENOEXEC; + } + + if (!endswith(p, ".service")) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Unit must be of type service, ignoring: %s", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, "Unit must be of type service: %s", rvalue); ++ return -ENOEXEC; + } + + r = manager_load_unit(UNIT(s)->manager, p, NULL, &error, &x); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to load unit %s, ignoring: %s", rvalue, bus_error_message(&error, r)); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to load unit %s: %s", rvalue, bus_error_message(&error, r)); ++ return -ENOEXEC; + } + + unit_ref_set(&s->service, x); +@@ -1787,13 +1803,13 @@ int config_parse_user_group( + + r = unit_full_printf(u, rvalue, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s, ignoring: %m", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s: %m", rvalue); ++ return -ENOEXEC; + } + + if (!valid_user_group_name_or_id(k)) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID, ignoring: %s", k); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID: %s", k); ++ return -ENOEXEC; + } + + n = k; +@@ -1851,19 +1867,19 @@ int config_parse_user_group_strv( + if (r == -ENOMEM) + return log_oom(); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Invalid syntax, ignoring: %s", rvalue); +- break; ++ log_syntax(unit, LOG_ERR, filename, line, r, "Invalid syntax: %s", rvalue); ++ return -ENOEXEC; + } + + r = unit_full_printf(u, word, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s, ignoring: %m", word); +- continue; ++ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in %s: %m", word); ++ return -ENOEXEC; + } + + if (!valid_user_group_name_or_id(k)) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID, ignoring: %s", k); +- continue; ++ log_syntax(unit, LOG_ERR, filename, line, 0, "Invalid user/group name or numeric ID: %s", k); ++ return -ENOEXEC; + } + + r = strv_push(users, k); +@@ -2022,20 +2038,24 @@ int config_parse_working_directory( + + r = unit_full_printf(u, rvalue, &k); + if (r < 0) { +- log_syntax(unit, LOG_ERR, filename, line, r, "Failed to resolve unit specifiers in working directory path '%s', ignoring: %m", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, r, ++ "Failed to resolve unit specifiers in working directory path '%s'%s: %m", ++ rvalue, missing_ok ? ", ignoring" : ""); ++ return missing_ok ? 0 : -ENOEXEC; + } + + path_kill_slashes(k); + + if (!utf8_is_valid(k)) { + log_syntax_invalid_utf8(unit, LOG_ERR, filename, line, rvalue); +- return 0; ++ return missing_ok ? 0 : -ENOEXEC; + } + + if (!path_is_absolute(k)) { +- log_syntax(unit, LOG_ERR, filename, line, 0, "Working directory path '%s' is not absolute, ignoring.", rvalue); +- return 0; ++ log_syntax(unit, LOG_ERR, filename, line, 0, ++ "Working directory path '%s' is not absolute%s.", ++ rvalue, missing_ok ? ", ignoring" : ""); ++ return missing_ok ? 0 : -ENOEXEC; + } + + free(c->working_directory); +@@ -4043,8 +4063,11 @@ int unit_load_fragment(Unit *u) { + return r; + + r = load_from_path(u, k); +- if (r < 0) ++ if (r < 0) { ++ if (r == -ENOEXEC) ++ log_unit_notice(u, "Unit configuration has fatal error, unit will not be started."); + return r; ++ } + + if (u->load_state == UNIT_STUB) { + SET_FOREACH(t, u->names, i) { +diff --git a/src/test/test-unit-file.c b/src/test/test-unit-file.c +index ade0ff2a6..fe1969570 100644 +--- a/src/test/test-unit-file.c ++++ b/src/test/test-unit-file.c +@@ -146,7 +146,7 @@ static void test_config_parse_exec(void) { + r = config_parse_exec(NULL, "fake", 4, "section", 1, + "LValue", 0, "/RValue/ argv0 r1", + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + + log_info("/* honour_argv0 */"); +@@ -161,7 +161,7 @@ static void test_config_parse_exec(void) { + r = config_parse_exec(NULL, "fake", 3, "section", 1, + "LValue", 0, "@/RValue", + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + + log_info("/* no command, whitespace only, reset */"); +@@ -220,7 +220,7 @@ static void test_config_parse_exec(void) { + "-@/RValue argv0 r1 ; ; " + "/goo/goo boo", + &c, u); +- assert_se(r >= 0); ++ assert_se(r == -ENOEXEC); + c1 = c1->command_next; + check_execcommand(c1, "/RValue", "argv0", "r1", NULL, true); + +@@ -374,7 +374,7 @@ static void test_config_parse_exec(void) { + r = config_parse_exec(NULL, "fake", 4, "section", 1, + "LValue", 0, path, + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + } + +@@ -401,21 +401,21 @@ static void test_config_parse_exec(void) { + r = config_parse_exec(NULL, "fake", 4, "section", 1, + "LValue", 0, "/path\\", + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + + log_info("/* missing ending ' */"); + r = config_parse_exec(NULL, "fake", 4, "section", 1, + "LValue", 0, "/path 'foo", + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + + log_info("/* missing ending ' with trailing backslash */"); + r = config_parse_exec(NULL, "fake", 4, "section", 1, + "LValue", 0, "/path 'foo\\", + &c, u); +- assert_se(r == 0); ++ assert_se(r == -ENOEXEC); + assert_se(c1->command_next == NULL); + + log_info("/* invalid space between modifiers */"); +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-core/systemd/systemd_232.bb b/import-layers/yocto-poky/meta/recipes-core/systemd/systemd_232.bb index 19a6d309b..df1d9e137 100644 --- a/import-layers/yocto-poky/meta/recipes-core/systemd/systemd_232.bb +++ b/import-layers/yocto-poky/meta/recipes-core/systemd/systemd_232.bb @@ -24,7 +24,7 @@ SRC_URI += " \ file://0009-util-bypass-unimplemented-_SC_PHYS_PAGES-system-conf.patch \ file://0010-implment-systemd-sysv-install-for-OE.patch \ file://0011-nss-mymachines-Build-conditionally-when-HAVE_MYHOSTN.patch \ - file://0012-rules-whitelist-hd-devices.patch \ + file://0012-rules-whitelist-hd-devices.patch \ file://0013-Make-root-s-home-directory-configurable.patch \ file://0014-Revert-rules-remove-firmware-loading-rules.patch \ file://0015-Revert-udev-remove-userspace-firmware-loading-suppor.patch \ diff --git a/import-layers/yocto-poky/meta/recipes-core/udev/eudev_3.2.bb b/import-layers/yocto-poky/meta/recipes-core/udev/eudev_3.2.bb index 211252ceb..857d20db7 100644 --- a/import-layers/yocto-poky/meta/recipes-core/udev/eudev_3.2.bb +++ b/import-layers/yocto-poky/meta/recipes-core/udev/eudev_3.2.bb @@ -1,6 +1,7 @@ SUMMARY = "eudev is a fork of systemd's udev" HOMEPAGE = "https://wiki.gentoo.org/wiki/Eudev" -LICENSE = "GPLv2.0+" +LICENSE = "GPLv2.0+ & LGPL-2.1+" +LICENSE_libudev = "LGPL-2.1+" LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe" DEPENDS = "glib-2.0 glib-2.0-native gperf-native kmod libxslt-native util-linux" diff --git a/import-layers/yocto-poky/meta/recipes-core/util-linux/util-linux.inc b/import-layers/yocto-poky/meta/recipes-core/util-linux/util-linux.inc index c6355021f..70cba6b59 100644 --- a/import-layers/yocto-poky/meta/recipes-core/util-linux/util-linux.inc +++ b/import-layers/yocto-poky/meta/recipes-core/util-linux/util-linux.inc @@ -271,9 +271,11 @@ BBCLASSEXTEND = "native nativesdk" python do_package_prepend () { if '--enable-su' in d.getVar('EXTRA_OECONF', True).split(): - alt_name = "su" - d.setVarFlag('ALTERNATIVE_LINK_NAME', alt_name, '%s/%s' % (d.getVar('base_bindir', True), alt_name)) - d.appendVar('ALTERNATIVE_%s' % (d.getVar('PN', True)), ' ' + alt_name) + d.appendVar(d.expand('ALTERNATIVE_${PN}'), ' su') + d.appendVar(d.expand('ALTERNATIVE_${PN}-doc'), ' su.1') + + d.setVarFlag('ALTERNATIVE_LINK_NAME', "su", d.expand('${base_bindir}/su')) + d.setVarFlag('ALTERNATIVE_LINK_NAME', "su.1", d.expand('${mandir}/man1/su.1')) } python populate_packages_prepend() { diff --git a/import-layers/yocto-poky/meta/recipes-core/volatile-binds/volatile-binds.bb b/import-layers/yocto-poky/meta/recipes-core/volatile-binds/volatile-binds.bb index fee7275e3..130ab55f0 100644 --- a/import-layers/yocto-poky/meta/recipes-core/volatile-binds/volatile-binds.bb +++ b/import-layers/yocto-poky/meta/recipes-core/volatile-binds/volatile-binds.bb @@ -17,6 +17,9 @@ REQUIRED_DISTRO_FEATURES = "systemd" VOLATILE_BINDS ?= "\ /var/volatile/lib /var/lib\n\ + /var/volatile/cache /var/cache\n\ + /var/volatile/spool /var/spool\n\ + /var/volatile/srv /srv\n\ " VOLATILE_BINDS[type] = "list" VOLATILE_BINDS[separator] = "\n" @@ -67,5 +70,11 @@ do_install () { for service in ${SYSTEMD_SERVICE_volatile-binds}; do install -m 0644 $service ${D}${systemd_unitdir}/system/ done + + # Suppress attempts to process some tmpfiles that are not temporary. + # + install -d ${D}${sysconfdir}/tmpfiles.d ${D}/var/cache + ln -s /dev/null ${D}${sysconfdir}/tmpfiles.d/etc.conf + ln -s /dev/null ${D}${sysconfdir}/tmpfiles.d/home.conf } do_install[dirs] = "${WORKDIR}" diff --git a/import-layers/yocto-poky/meta/recipes-core/zlib/zlib_1.2.8.bb b/import-layers/yocto-poky/meta/recipes-core/zlib/zlib_1.2.8.bb index 9470adb1b..913c7033d 100644 --- a/import-layers/yocto-poky/meta/recipes-core/zlib/zlib_1.2.8.bb +++ b/import-layers/yocto-poky/meta/recipes-core/zlib/zlib_1.2.8.bb @@ -6,7 +6,7 @@ SECTION = "libs" LICENSE = "Zlib" LIC_FILES_CHKSUM = "file://zlib.h;beginline=4;endline=23;md5=fde612df1e5933c428b73844a0c494fd" -SRC_URI = "http://www.zlib.net/${BPN}-${PV}.tar.xz \ +SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/${BPN}/${PV}/${BPN}-${PV}.tar.xz \ file://remove.ldconfig.call.patch \ file://Makefile-runtests.patch \ file://ldflags-tests.patch \ diff --git a/import-layers/yocto-poky/meta/recipes-devtools/automake/automake/0001-automake-port-to-Perl-5.22-and-later.patch b/import-layers/yocto-poky/meta/recipes-devtools/automake/automake/0001-automake-port-to-Perl-5.22-and-later.patch new file mode 100644 index 000000000..0e6895fb4 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/automake/automake/0001-automake-port-to-Perl-5.22-and-later.patch @@ -0,0 +1,32 @@ +From 13f00eb4493c217269b76614759e452d8302955e Mon Sep 17 00:00:00 2001 +From: Paul Eggert <eggert@cs.ucla.edu> +Date: Thu, 31 Mar 2016 16:35:29 -0700 +Subject: [PATCH] automake: port to Perl 5.22 and later + +Without this change, Perl 5.22 complains "Unescaped left brace in +regex is deprecated" and this is planned to become a hard error in +Perl 5.26. See: +http://search.cpan.org/dist/perl-5.22.0/pod/perldelta.pod#A_literal_%22{%22_should_now_be_escaped_in_a_pattern +* bin/automake.in (substitute_ac_subst_variables): Escape left brace. + +Upstream-Status: Backport [13f00eb4493c217269b76614759e452d8302955e] +--- + bin/automake.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/bin/automake.in b/bin/automake.in +index a3a0aa318..2c8f31e14 100644 +--- a/bin/automake.in ++++ b/bin/automake.in +@@ -3878,7 +3878,7 @@ sub substitute_ac_subst_variables_worker + sub substitute_ac_subst_variables + { + my ($text) = @_; +- $text =~ s/\${([^ \t=:+{}]+)}/substitute_ac_subst_variables_worker ($1)/ge; ++ $text =~ s/\$[{]([^ \t=:+{}]+)}/substitute_ac_subst_variables_worker ($1)/ge; + return $text; + } + +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/automake/automake_1.15.bb b/import-layers/yocto-poky/meta/recipes-devtools/automake/automake_1.15.bb index a3c72fd33..61ae3ba3e 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/automake/automake_1.15.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/automake/automake_1.15.bb @@ -21,7 +21,9 @@ RDEPENDS_${PN}_class-native = "autoconf-native hostperl-runtime-native" SRC_URI += " file://python-libdir.patch \ file://buildtest.patch \ file://performance.patch \ - file://new_rt_path_for_test-driver.patch" + file://new_rt_path_for_test-driver.patch \ + file://0001-automake-port-to-Perl-5.22-and-later.patch \ + " SRC_URI[md5sum] = "716946a105ca228ab545fc37a70df3a3" SRC_URI[sha256sum] = "7946e945a96e28152ba5a6beb0625ca715c6e32ac55f2e353ef54def0c8ed924" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils-2.27.inc b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils-2.27.inc index af1420b24..0936d974d 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils-2.27.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils-2.27.inc @@ -36,6 +36,15 @@ SRC_URI = "\ file://0014-libtool-remove-rpath.patch \ file://0015-binutils-mips-gas-pic-relax-linkonce.diff \ file://0015-Refine-.cfi_sections-check-to-only-consider-compact-.patch \ + file://0016-Fix-seg-fault-in-ARM-linker-when-trying-to-parse-a-b.patch \ + file://0017-Fix-the-generation-of-alignment-frags-in-code-sectio.patch \ + file://0001-ppc-apuinfo-for-spe-parsed-incorrectly.patch \ + file://CVE-2017-6965.patch \ + file://CVE-2017-6966.patch \ + file://CVE-2017-6969.patch \ + file://CVE-2017-6969_2.patch \ + file://CVE-2017-7209.patch \ + file://CVE-2017-7210.patch \ " S = "${WORKDIR}/git" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0001-ppc-apuinfo-for-spe-parsed-incorrectly.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0001-ppc-apuinfo-for-spe-parsed-incorrectly.patch new file mode 100644 index 000000000..d82a0b694 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0001-ppc-apuinfo-for-spe-parsed-incorrectly.patch @@ -0,0 +1,37 @@ +From 8941017bc0226b60ce306d5271df15820ce66a53 Mon Sep 17 00:00:00 2001 +From: Alan Modra <amodra@gmail.com> +Date: Tue, 30 Aug 2016 20:57:32 +0930 +Subject: [PATCH] ppc apuinfo for spe parsed incorrectly +Organization: O.S. Systems Software LTDA. + +apuinfo saying SPE resulted in mach = bfd_mach_ppc_vle due to a +missing break. + + PR 20531 + * elf32-ppc.c (_bfd_elf_ppc_set_arch): Add missing "break". + + +Backport from : +https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;a=commitdiff;h=8941017b + +Upstream-Status: Backport +Signed-off-by: Fabio Berton <fabio.berton@ossystems.com.br> +--- + bfd/elf32-ppc.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/bfd/elf32-ppc.c b/bfd/elf32-ppc.c +index 95ce1dc..e42ef1c 100644 +--- a/bfd/elf32-ppc.c ++++ b/bfd/elf32-ppc.c +@@ -2246,6 +2246,7 @@ _bfd_elf_ppc_set_arch (bfd *abfd) + case PPC_APUINFO_BRLOCK: + if (mach != bfd_mach_ppc_vle) + mach = bfd_mach_ppc_e500; ++ break; + + case PPC_APUINFO_VLE: + mach = bfd_mach_ppc_vle; +-- +2.1.4 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0016-Fix-seg-fault-in-ARM-linker-when-trying-to-parse-a-b.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0016-Fix-seg-fault-in-ARM-linker-when-trying-to-parse-a-b.patch new file mode 100644 index 000000000..33bf1e8f6 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0016-Fix-seg-fault-in-ARM-linker-when-trying-to-parse-a-b.patch @@ -0,0 +1,31 @@ +From 72b09de92cc597c53b1d762882b67a17fe56846c Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Tue, 23 Aug 2016 09:45:11 +0100 +Subject: [PATCH 16/16] Fix seg-fault in ARM linker when trying to parse a + binary file. + + * elf32-arm.c (elf32_arm_count_additional_relocs): Return zero if + there is no arm data associated with the section. +--- +Upstream-Status: Backport +Signed-off-by: Khem Raj <raj.khem@gmail.com> + + bfd/elf32-arm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c +index 700bec3..3fab609 100644 +--- a/bfd/elf32-arm.c ++++ b/bfd/elf32-arm.c +@@ -18207,7 +18207,7 @@ elf32_arm_count_additional_relocs (asection *sec) + { + struct _arm_elf_section_data *arm_data; + arm_data = get_arm_elf_section_data (sec); +- return arm_data->additional_reloc_count; ++ return arm_data == NULL ? 0 : arm_data->additional_reloc_count; + } + + /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which +-- +2.10.1 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0017-Fix-the-generation-of-alignment-frags-in-code-sectio.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0017-Fix-the-generation-of-alignment-frags-in-code-sectio.patch new file mode 100644 index 000000000..f8b46be69 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/0017-Fix-the-generation-of-alignment-frags-in-code-sectio.patch @@ -0,0 +1,139 @@ +From 4a4286465b5d6c28968bc2b29ae08daca7f219a3 Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Fri, 18 Nov 2016 11:42:48 -0800 +Subject: [PATCH] Fix the generation of alignment frags in code sections for AArch64. + +PR gas/20364 +* config/tc-aarch64.c (s_ltorg): Change the mapping state after +aligning the frag. +(aarch64_init): Treat rs_align frags in code sections as +containing code, not data. +* testsuite/gas/aarch64/pr20364.s: New test. +* testsuite/gas/aarch64/pr20364.d: New test driver. + +Backporting the patch from binutils mainline +https://sourceware.org/git/gitweb.cgi?p=binutils-gdb.git;h=7ea12e5c3ad54da440c08f32da09534e63e515ca + +Upstream-Status: Backport + +Signed-off-by: Manjukumar Matha <manjukumar.harthikote-matha@xilinx.com> +--- + gas/ChangeLog | 10 ++++++++++ + gas/config/tc-aarch64.c | 10 +++++++--- + gas/testsuite/gas/aarch64/pr20364.d | 13 +++++++++++++ + gas/testsuite/gas/aarch64/pr20364.s | 28 ++++++++++++++++++++++++++++ + 4 files changed, 58 insertions(+), 3 deletions(-) + create mode 100644 gas/testsuite/gas/aarch64/pr20364.d + create mode 100644 gas/testsuite/gas/aarch64/pr20364.s + +diff --git a/gas/ChangeLog b/gas/ChangeLog +index a39895a..fad06dc 100644 +--- a/gas/ChangeLog ++++ b/gas/ChangeLog +@@ -1,3 +1,13 @@ ++2016-08-05 Nick Clifton <nickc@redhat.com> ++ ++ PR gas/20364 ++ * config/tc-aarch64.c (s_ltorg): Change the mapping state after ++ aligning the frag. ++ (aarch64_init): Treat rs_align frags in code sections as ++ containing code, not data. ++ * testsuite/gas/aarch64/pr20364.s: New test. ++ * testsuite/gas/aarch64/pr20364.d: New test driver. ++ + 2016-08-03 Tristan Gingold <gingold@adacore.com> + + * configure: Regenerate. +diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c +index ddc40f2..74933cb 100644 +--- a/gas/config/tc-aarch64.c ++++ b/gas/config/tc-aarch64.c +@@ -1736,13 +1736,13 @@ s_ltorg (int ignored ATTRIBUTE_UNUSED) + if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0) + continue; + +- mapping_state (MAP_DATA); +- + /* Align pool as you have word accesses. + Only make a frag if we have to. */ + if (!need_pass_2) + frag_align (align, 0, 0); + ++ mapping_state (MAP_DATA); ++ + record_alignment (now_seg, align); + + sprintf (sym_name, "$$lit_\002%x", pool->id); +@@ -6373,11 +6373,15 @@ aarch64_init_frag (fragS * fragP, int max_chars) + + switch (fragP->fr_type) + { +- case rs_align: + case rs_align_test: + case rs_fill: + mapping_state_2 (MAP_DATA, max_chars); + break; ++ case rs_align: ++ /* PR 20364: We can get alignment frags in code sections, ++ so do not just assume that we should use the MAP_DATA state. */ ++ mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars); ++ break; + case rs_align_code: + mapping_state_2 (MAP_INSN, max_chars); + break; +diff --git a/gas/testsuite/gas/aarch64/pr20364.d b/gas/testsuite/gas/aarch64/pr20364.d +new file mode 100644 +index 0000000..babcff1 +--- /dev/null ++++ b/gas/testsuite/gas/aarch64/pr20364.d +@@ -0,0 +1,13 @@ ++# Check that ".align <size>, <fill>" does not set the mapping state to DATA, causing unnecessary frag generation. ++#name: PR20364 ++#objdump: -d ++ ++.*: file format .* ++ ++Disassembly of section \.vectors: ++ ++0+000 <.*>: ++ 0: d2800000 mov x0, #0x0 // #0 ++ 4: 94000000 bl 0 <plat_report_exception> ++ 8: 17fffffe b 0 <bl1_exceptions> ++ +diff --git a/gas/testsuite/gas/aarch64/pr20364.s b/gas/testsuite/gas/aarch64/pr20364.s +new file mode 100644 +index 0000000..594ad7c +--- /dev/null ++++ b/gas/testsuite/gas/aarch64/pr20364.s +@@ -0,0 +1,28 @@ ++ .macro vector_base label ++ .section .vectors, "ax" ++ .align 11, 0 ++ \label: ++ .endm ++ ++ .macro vector_entry label ++ .section .vectors, "ax" ++ .align 7, 0 ++ \label: ++ .endm ++ ++ .macro check_vector_size since ++ .if (. - \since) > (32 * 4) ++ .error "Vector exceeds 32 instructions" ++ .endif ++ .endm ++ ++ .globl bl1_exceptions ++ ++vector_base bl1_exceptions ++ ++vector_entry SynchronousExceptionSP0 ++ mov x0, #0x0 ++ bl plat_report_exception ++ b SynchronousExceptionSP0 ++ check_vector_size SynchronousExceptionSP0 ++ +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6965.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6965.patch new file mode 100644 index 000000000..85f7f98fe --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6965.patch @@ -0,0 +1,127 @@ +From 6f898c17b1d6f6a29a05ca6de31f0fc8f52cfbfe Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Mon, 13 Feb 2017 13:08:32 +0000 +Subject: [PATCH 1/2] Fix readelf writing to illegal addresses whilst + processing corrupt input files containing symbol-difference relocations. + + PR binutils/21137 + * readelf.c (target_specific_reloc_handling): Add end parameter. + Check for buffer overflow before writing relocated values. + (apply_relocations): Pass end to target_specific_reloc_handling. + +(cherry pick from commit 03f7786e2f440b9892b1c34a58fb26222ce1b493) +Upstream-Status: Backport [master] +CVE: CVE-2017-6965 + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 7 +++++++ + binutils/readelf.c | 30 +++++++++++++++++++++++++----- + 2 files changed, 32 insertions(+), 5 deletions(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index 995de87dc3..154b797a29 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -5,6 +5,13 @@ + Check for buffer overflow before writing relocated values. + (apply_relocations): Pass end to target_specific_reloc_handling. + ++2017-02-13 Nick Clifton <nickc@redhat.com> ++ ++ PR binutils/21137 ++ * readelf.c (target_specific_reloc_handling): Add end parameter. ++ Check for buffer overflow before writing relocated values. ++ (apply_relocations): Pass end to target_specific_reloc_handling. ++ + 2016-08-03 Tristan Gingold <gingold@adacore.com> + + * configure: Regenerate. +diff --git a/binutils/readelf.c b/binutils/readelf.c +index d31558c3b4..220671f76f 100644 +--- a/binutils/readelf.c ++++ b/binutils/readelf.c +@@ -11345,6 +11345,7 @@ process_syminfo (FILE * file ATTRIBUTE_UNUSED) + static bfd_boolean + target_specific_reloc_handling (Elf_Internal_Rela * reloc, + unsigned char * start, ++ unsigned char * end, + Elf_Internal_Sym * symtab) + { + unsigned int reloc_type = get_reloc_type (reloc->r_info); +@@ -11384,13 +11385,19 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + handle_sym_diff: + if (saved_sym != NULL) + { ++ int reloc_size = reloc_type == 1 ? 4 : 2; + bfd_vma value; + + value = reloc->r_addend + + (symtab[get_reloc_symindex (reloc->r_info)].st_value + - saved_sym->st_value); + +- byte_put (start + reloc->r_offset, value, reloc_type == 1 ? 4 : 2); ++ if (start + reloc->r_offset + reloc_size >= end) ++ /* PR 21137 */ ++ error (_("MSP430 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + reloc_size, end); ++ else ++ byte_put (start + reloc->r_offset, value, reloc_size); + + saved_sym = NULL; + return TRUE; +@@ -11421,13 +11428,18 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + case 2: /* R_MN10300_16 */ + if (saved_sym != NULL) + { ++ int reloc_size = reloc_type == 1 ? 4 : 2; + bfd_vma value; + + value = reloc->r_addend + + (symtab[get_reloc_symindex (reloc->r_info)].st_value + - saved_sym->st_value); + +- byte_put (start + reloc->r_offset, value, reloc_type == 1 ? 4 : 2); ++ if (start + reloc->r_offset + reloc_size >= end) ++ error (_("MN10300 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + reloc_size, end); ++ else ++ byte_put (start + reloc->r_offset, value, reloc_size); + + saved_sym = NULL; + return TRUE; +@@ -11462,12 +11474,20 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + break; + + case 0x41: /* R_RL78_ABS32. */ +- byte_put (start + reloc->r_offset, value, 4); ++ if (start + reloc->r_offset + 4 >= end) ++ error (_("RL78 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + 2, end); ++ else ++ byte_put (start + reloc->r_offset, value, 4); + value = 0; + return TRUE; + + case 0x43: /* R_RL78_ABS16. */ +- byte_put (start + reloc->r_offset, value, 2); ++ if (start + reloc->r_offset + 2 >= end) ++ error (_("RL78 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + 2, end); ++ else ++ byte_put (start + reloc->r_offset, value, 2); + value = 0; + return TRUE; + +@@ -12074,7 +12094,7 @@ apply_relocations (void * file, + + reloc_type = get_reloc_type (rp->r_info); + +- if (target_specific_reloc_handling (rp, start, symtab)) ++ if (target_specific_reloc_handling (rp, start, end, symtab)) + continue; + else if (is_none_reloc (reloc_type)) + continue; +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6966.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6966.patch new file mode 100644 index 000000000..5e364ef69 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6966.patch @@ -0,0 +1,240 @@ +From 310e2cdc0a46ef62602097f5c21c393571e76df4 Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Mon, 13 Feb 2017 14:03:22 +0000 +Subject: [PATCH 2/2] Fix read-after-free error in readelf when processing + multiple, relocated sections in an MSP430 binary. + + PR binutils/21139 + * readelf.c (target_specific_reloc_handling): Add num_syms + parameter. Check for symbol table overflow before accessing + symbol value. If reloc pointer is NULL, discard all saved state. + (apply_relocations): Pass num_syms to target_specific_reloc_handling. + Call target_specific_reloc_handling with a NULL reloc pointer + after processing all of the relocs. + +(cherry pick from commit f84ce13b6708801ca1d6289b7c4003e2f5a6d7f9) +Upstream-Status: Backport [master] +CVE: CVE-2017-6966 + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 10 +++++ + binutils/readelf.c | 109 +++++++++++++++++++++++++++++++++++++++++------------ + 2 files changed, 94 insertions(+), 25 deletions(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index 154b797a29..aef0a51f19 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -1,5 +1,15 @@ + 2017-02-13 Nick Clifton <nickc@redhat.com> + ++ PR binutils/21139 ++ * readelf.c (target_specific_reloc_handling): Add num_syms ++ parameter. Check for symbol table overflow before accessing ++ symbol value. If reloc pointer is NULL, discard all saved state. ++ (apply_relocations): Pass num_syms to target_specific_reloc_handling. ++ Call target_specific_reloc_handling with a NULL reloc pointer ++ after processing all of the relocs. ++ ++2017-02-13 Nick Clifton <nickc@redhat.com> ++ + PR binutils/21137 + * readelf.c (target_specific_reloc_handling): Add end parameter. + Check for buffer overflow before writing relocated values. +diff --git a/binutils/readelf.c b/binutils/readelf.c +index 220671f76f..2b6cef1638 100644 +--- a/binutils/readelf.c ++++ b/binutils/readelf.c +@@ -11340,15 +11340,27 @@ process_syminfo (FILE * file ATTRIBUTE_UNUSED) + + /* Check to see if the given reloc needs to be handled in a target specific + manner. If so then process the reloc and return TRUE otherwise return +- FALSE. */ ++ FALSE. ++ ++ If called with reloc == NULL, then this is a signal that reloc processing ++ for the current section has finished, and any saved state should be ++ discarded. */ + + static bfd_boolean + target_specific_reloc_handling (Elf_Internal_Rela * reloc, + unsigned char * start, + unsigned char * end, +- Elf_Internal_Sym * symtab) ++ Elf_Internal_Sym * symtab, ++ unsigned long num_syms) + { +- unsigned int reloc_type = get_reloc_type (reloc->r_info); ++ unsigned int reloc_type = 0; ++ unsigned long sym_index = 0; ++ ++ if (reloc) ++ { ++ reloc_type = get_reloc_type (reloc->r_info); ++ sym_index = get_reloc_symindex (reloc->r_info); ++ } + + switch (elf_header.e_machine) + { +@@ -11357,13 +11369,24 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + { + static Elf_Internal_Sym * saved_sym = NULL; + ++ if (reloc == NULL) ++ { ++ saved_sym = NULL; ++ return TRUE; ++ } ++ + switch (reloc_type) + { + case 10: /* R_MSP430_SYM_DIFF */ + if (uses_msp430x_relocs ()) + break; + case 21: /* R_MSP430X_SYM_DIFF */ +- saved_sym = symtab + get_reloc_symindex (reloc->r_info); ++ /* PR 21139. */ ++ if (sym_index >= num_syms) ++ error (_("MSP430 SYM_DIFF reloc contains invalid symbol index %lu\n"), ++ sym_index); ++ else ++ saved_sym = symtab + sym_index; + return TRUE; + + case 1: /* R_MSP430_32 or R_MSP430_ABS32 */ +@@ -11388,16 +11411,21 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + int reloc_size = reloc_type == 1 ? 4 : 2; + bfd_vma value; + +- value = reloc->r_addend +- + (symtab[get_reloc_symindex (reloc->r_info)].st_value +- - saved_sym->st_value); +- +- if (start + reloc->r_offset + reloc_size >= end) +- /* PR 21137 */ +- error (_("MSP430 sym diff reloc writes past end of section (%p vs %p)\n"), +- start + reloc->r_offset + reloc_size, end); ++ if (sym_index >= num_syms) ++ error (_("MSP430 reloc contains invalid symbol index %lu\n"), ++ sym_index); + else +- byte_put (start + reloc->r_offset, value, reloc_size); ++ { ++ value = reloc->r_addend + (symtab[sym_index].st_value ++ - saved_sym->st_value); ++ ++ if (start + reloc->r_offset + reloc_size >= end) ++ /* PR 21137 */ ++ error (_("MSP430 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + reloc_size, end); ++ else ++ byte_put (start + reloc->r_offset, value, reloc_size); ++ } + + saved_sym = NULL; + return TRUE; +@@ -11417,13 +11445,24 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + { + static Elf_Internal_Sym * saved_sym = NULL; + ++ if (reloc == NULL) ++ { ++ saved_sym = NULL; ++ return TRUE; ++ } ++ + switch (reloc_type) + { + case 34: /* R_MN10300_ALIGN */ + return TRUE; + case 33: /* R_MN10300_SYM_DIFF */ +- saved_sym = symtab + get_reloc_symindex (reloc->r_info); ++ if (sym_index >= num_syms) ++ error (_("MN10300_SYM_DIFF reloc contains invalid symbol index %lu\n"), ++ sym_index); ++ else ++ saved_sym = symtab + sym_index; + return TRUE; ++ + case 1: /* R_MN10300_32 */ + case 2: /* R_MN10300_16 */ + if (saved_sym != NULL) +@@ -11431,15 +11470,20 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + int reloc_size = reloc_type == 1 ? 4 : 2; + bfd_vma value; + +- value = reloc->r_addend +- + (symtab[get_reloc_symindex (reloc->r_info)].st_value +- - saved_sym->st_value); +- +- if (start + reloc->r_offset + reloc_size >= end) +- error (_("MN10300 sym diff reloc writes past end of section (%p vs %p)\n"), +- start + reloc->r_offset + reloc_size, end); ++ if (sym_index >= num_syms) ++ error (_("MN10300 reloc contains invalid symbol index %lu\n"), ++ sym_index); + else +- byte_put (start + reloc->r_offset, value, reloc_size); ++ { ++ value = reloc->r_addend + (symtab[sym_index].st_value ++ - saved_sym->st_value); ++ ++ if (start + reloc->r_offset + reloc_size >= end) ++ error (_("MN10300 sym diff reloc writes past end of section (%p vs %p)\n"), ++ start + reloc->r_offset + reloc_size, end); ++ else ++ byte_put (start + reloc->r_offset, value, reloc_size); ++ } + + saved_sym = NULL; + return TRUE; +@@ -11459,12 +11503,24 @@ target_specific_reloc_handling (Elf_Internal_Rela * reloc, + static bfd_vma saved_sym2 = 0; + static bfd_vma value; + ++ if (reloc == NULL) ++ { ++ saved_sym1 = saved_sym2 = 0; ++ return TRUE; ++ } ++ + switch (reloc_type) + { + case 0x80: /* R_RL78_SYM. */ + saved_sym1 = saved_sym2; +- saved_sym2 = symtab[get_reloc_symindex (reloc->r_info)].st_value; +- saved_sym2 += reloc->r_addend; ++ if (sym_index >= num_syms) ++ error (_("RL78_SYM reloc contains invalid symbol index %lu\n"), ++ sym_index); ++ else ++ { ++ saved_sym2 = symtab[sym_index].st_value; ++ saved_sym2 += reloc->r_addend; ++ } + return TRUE; + + case 0x83: /* R_RL78_OPsub. */ +@@ -12094,7 +12150,7 @@ apply_relocations (void * file, + + reloc_type = get_reloc_type (rp->r_info); + +- if (target_specific_reloc_handling (rp, start, end, symtab)) ++ if (target_specific_reloc_handling (rp, start, end, symtab, num_syms)) + continue; + else if (is_none_reloc (reloc_type)) + continue; +@@ -12190,6 +12246,9 @@ apply_relocations (void * file, + } + + free (symtab); ++ /* Let the target specific reloc processing code know that ++ we have finished with these relocs. */ ++ target_specific_reloc_handling (NULL, NULL, NULL, NULL, 0); + + if (relocs_return) + { +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969.patch new file mode 100644 index 000000000..3d036c4cf --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969.patch @@ -0,0 +1,56 @@ +From 489246368e2c49a795ad5ecbc8895cbc854292fa Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Fri, 17 Feb 2017 15:59:45 +0000 +Subject: Fix illegal memory accesses in readelf when parsing a corrupt binary. + + PR binutils/21156 + * readelf.c (find_section_in_set): Test for invalid section + indicies. + +CVE: CVE-2017-6969 +Upstream-Status: Backport [master] + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 6 ++++++ + binutils/readelf.c | 10 ++++++++-- + 2 files changed, 14 insertions(+), 2 deletions(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index a70bdb7a7b..dbf8eb079e 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -1,3 +1,9 @@ ++2017-02-17 Nick Clifton <nickc@redhat.com> ++ ++ PR binutils/21156 ++ * readelf.c (find_section_in_set): Test for invalid section ++ indicies. ++ + 2016-08-03 Tristan Gingold <gingold@adacore.com> + + * configure: Regenerate. +diff --git a/binutils/readelf.c b/binutils/readelf.c +index d31558c3b4..7f7365dbc5 100644 +--- a/binutils/readelf.c ++++ b/binutils/readelf.c +@@ -674,8 +674,14 @@ find_section_in_set (const char * name, unsigned int * set) + if (set != NULL) + { + while ((i = *set++) > 0) +- if (streq (SECTION_NAME (section_headers + i), name)) +- return section_headers + i; ++ { ++ /* See PR 21156 for a reproducer. */ ++ if (i >= elf_header.e_shnum) ++ continue; /* FIXME: Should we issue an error message ? */ ++ ++ if (streq (SECTION_NAME (section_headers + i), name)) ++ return section_headers + i; ++ } + } + + return find_section (name); +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969_2.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969_2.patch new file mode 100644 index 000000000..491c7086e --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-6969_2.patch @@ -0,0 +1,122 @@ +From 59fcd64fe65a89fb0acaf5463840310701189375 Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Mon, 20 Feb 2017 14:40:39 +0000 +Subject: Fix another memory access error in readelf when parsing a corrupt + binary. + + PR binutils/21156 + * dwarf.c (cu_tu_indexes_read): Move into... + (load_cu_tu_indexes): ... here. Change the variable into + tri-state. Change the function into boolean, returning + false if the indicies could not be loaded. + (find_cu_tu_set): Return NULL if the indicies could not be + loaded. + +CVE: CVE-2017-6969 +Upstream-Status: Backport [master] + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 10 ++++++++++ + binutils/dwarf.c | 34 ++++++++++++++++++++-------------- + 2 files changed, 30 insertions(+), 14 deletions(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index dbf8eb079e..55d2f8ba40 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -1,3 +1,13 @@ ++2017-02-20 Nick Clifton <nickc@redhat.com> ++ ++ PR binutils/21156 ++ * dwarf.c (cu_tu_indexes_read): Move into... ++ (load_cu_tu_indexes): ... here. Change the variable into ++ tri-state. Change the function into boolean, returning ++ false if the indicies could not be loaded. ++ (find_cu_tu_set): Return NULL if the indicies could not be ++ loaded. ++ + 2017-02-17 Nick Clifton <nickc@redhat.com> + + PR binutils/21156 +diff --git a/binutils/dwarf.c b/binutils/dwarf.c +index 282e069958..a23267feb6 100644 +--- a/binutils/dwarf.c ++++ b/binutils/dwarf.c +@@ -76,7 +76,6 @@ int dwarf_check = 0; + as a zero-terminated list of section indexes comprising one set of debug + sections from a .dwo file. */ + +-static int cu_tu_indexes_read = 0; + static unsigned int *shndx_pool = NULL; + static unsigned int shndx_pool_size = 0; + static unsigned int shndx_pool_used = 0; +@@ -99,7 +98,7 @@ static int tu_count = 0; + static struct cu_tu_set *cu_sets = NULL; + static struct cu_tu_set *tu_sets = NULL; + +-static void load_cu_tu_indexes (void *file); ++static bfd_boolean load_cu_tu_indexes (void *); + + /* Values for do_debug_lines. */ + #define FLAG_DEBUG_LINES_RAW 1 +@@ -2713,7 +2712,7 @@ load_debug_info (void * file) + return num_debug_info_entries; + + /* If this is a DWARF package file, load the CU and TU indexes. */ +- load_cu_tu_indexes (file); ++ (void) load_cu_tu_indexes (file); + + if (load_debug_section (info, file) + && process_debug_info (&debug_displays [info].section, file, abbrev, 1, 0)) +@@ -7302,21 +7301,27 @@ process_cu_tu_index (struct dwarf_section *section, int do_display) + section sets that we can use to associate a .debug_info.dwo section + with its associated .debug_abbrev.dwo section in a .dwp file. */ + +-static void ++static bfd_boolean + load_cu_tu_indexes (void *file) + { ++ static int cu_tu_indexes_read = -1; /* Tri-state variable. */ ++ + /* If we have already loaded (or tried to load) the CU and TU indexes + then do not bother to repeat the task. */ +- if (cu_tu_indexes_read) +- return; +- +- if (load_debug_section (dwp_cu_index, file)) +- process_cu_tu_index (&debug_displays [dwp_cu_index].section, 0); +- +- if (load_debug_section (dwp_tu_index, file)) +- process_cu_tu_index (&debug_displays [dwp_tu_index].section, 0); ++ if (cu_tu_indexes_read == -1) ++ { ++ cu_tu_indexes_read = TRUE; ++ ++ if (load_debug_section (dwp_cu_index, file)) ++ if (! process_cu_tu_index (&debug_displays [dwp_cu_index].section, 0)) ++ cu_tu_indexes_read = FALSE; ++ ++ if (load_debug_section (dwp_tu_index, file)) ++ if (! process_cu_tu_index (&debug_displays [dwp_tu_index].section, 0)) ++ cu_tu_indexes_read = FALSE; ++ } + +- cu_tu_indexes_read = 1; ++ return (bfd_boolean) cu_tu_indexes_read; + } + + /* Find the set of sections that includes section SHNDX. */ +@@ -7326,7 +7331,8 @@ find_cu_tu_set (void *file, unsigned int shndx) + { + unsigned int i; + +- load_cu_tu_indexes (file); ++ if (! load_cu_tu_indexes (file)) ++ return NULL; + + /* Find SHNDX in the shndx pool. */ + for (i = 0; i < shndx_pool_used; i++) +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7209.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7209.patch new file mode 100644 index 000000000..336d72cfe --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7209.patch @@ -0,0 +1,63 @@ +From 6e5e9d96b5bd7dc3147db9917d6a7a20682915cc Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Mon, 13 Feb 2017 15:04:37 +0000 +Subject: Fix invalid read of section contents whilst processing a corrupt + binary. + + PR binutils/21135 + * readelf.c (dump_section_as_bytes): Handle the case where + uncompress_section_contents returns false. + +CVE: CVE-2017-7209 +Upstream-Status: Backport[master] + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 6 ++++++ + binutils/readelf.c | 16 ++++++++++++---- + 2 files changed, 18 insertions(+), 4 deletions(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index 55d2f8ba40..c4d8e60eca 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -1,3 +1,9 @@ ++2017-02-13 Nick Clifton <nickc@redhat.com> ++ ++ PR binutils/21135 ++ * readelf.c (dump_section_as_bytes): Handle the case where ++ uncompress_section_contents returns false. ++ + 2017-02-20 Nick Clifton <nickc@redhat.com> + + PR binutils/21156 +diff --git a/binutils/readelf.c b/binutils/readelf.c +index 7f7365dbc5..bc4e92fa81 100644 +--- a/binutils/readelf.c ++++ b/binutils/readelf.c +@@ -12473,10 +12473,18 @@ dump_section_as_bytes (Elf_Internal_Shdr * section, + new_size -= 12; + } + +- if (uncompressed_size +- && uncompress_section_contents (& start, uncompressed_size, +- & new_size)) +- section_size = new_size; ++ if (uncompressed_size) ++ { ++ if (uncompress_section_contents (& start, uncompressed_size, ++ & new_size)) ++ section_size = new_size; ++ else ++ { ++ error (_("Unable to decompress section %s\n"), ++ printable_section_name (section)); ++ return; ++ } ++ } + } + + if (relocate) +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7210.patch b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7210.patch new file mode 100644 index 000000000..211d2bfd8 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/binutils/binutils/CVE-2017-7210.patch @@ -0,0 +1,71 @@ +From 80958b04c91edcd41c42807225a7ad1b2a4ce0e6 Mon Sep 17 00:00:00 2001 +From: Nick Clifton <nickc@redhat.com> +Date: Tue, 14 Feb 2017 14:07:29 +0000 +Subject: Fix handling of corrupt STABS enum type strings. + + PR binutils/21157 + * stabs.c (parse_stab_enum_type): Check for corrupt NAME:VALUE + pairs. + (parse_number): Exit early if passed an empty string. + +CVE: CVE-2017-7210 +Upstream-Status: Backport [master] + +Signed-off-by: Yuanjie Huang <yuanjie.huang@windriver.com> +--- + binutils/ChangeLog | 7 +++++++ + binutils/stabs.c | 14 +++++++++++++- + 2 files changed, 20 insertions(+), 1 deletion(-) + +diff --git a/binutils/ChangeLog b/binutils/ChangeLog +index c4d8e60eca..2bae9ec587 100644 +--- a/binutils/ChangeLog ++++ b/binutils/ChangeLog +@@ -1,3 +1,10 @@ ++2017-02-14 Nick Clifton <nickc@redhat.com> ++ ++ PR binutils/21157 ++ * stabs.c (parse_stab_enum_type): Check for corrupt NAME:VALUE ++ pairs. ++ (parse_number): Exit early if passed an empty string. ++ + 2017-02-13 Nick Clifton <nickc@redhat.com> + + PR binutils/21135 +diff --git a/binutils/stabs.c b/binutils/stabs.c +index aebde7afe9..c425afe98e 100644 +--- a/binutils/stabs.c ++++ b/binutils/stabs.c +@@ -232,6 +232,10 @@ parse_number (const char **pp, bfd_boolean *poverflow) + + orig = *pp; + ++ /* Stop early if we are passed an empty string. */ ++ if (*orig == 0) ++ return (bfd_vma) 0; ++ + errno = 0; + ul = strtoul (*pp, (char **) pp, 0); + if (ul + 1 != 0 || errno == 0) +@@ -1975,9 +1979,17 @@ parse_stab_enum_type (void *dhandle, const char **pp) + bfd_signed_vma val; + + p = *pp; +- while (*p != ':') ++ while (*p != ':' && *p != 0) + ++p; + ++ if (*p == 0) ++ { ++ bad_stab (orig); ++ free (names); ++ free (values); ++ return DEBUG_TYPE_NULL; ++ } ++ + name = savestring (*pp, p - *pp); + + *pp = p + 1; +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/docbook-utils/docbook-utils-native_0.6.14.bb b/import-layers/yocto-poky/meta/recipes-devtools/docbook-utils/docbook-utils-native_0.6.14.bb index c3a5f3b42..44b43a810 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/docbook-utils/docbook-utils-native_0.6.14.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/docbook-utils/docbook-utils-native_0.6.14.bb @@ -10,7 +10,7 @@ DEPENDS = "openjade-native sgmlspl-native docbook-dsssl-stylesheets-native docbo PR = "r3" SRC_URI = "\ - ftp://sources.redhat.com/pub/docbook-tools/new-trials/SOURCES/docbook-utils-${PV}.tar.gz \ + http://ftp.osuosl.org/pub/blfs/conglomeration/docbook-utils/docbook-utils-${PV}.tar.gz \ file://re.patch \ " diff --git a/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-exit-with-exit-status-0-if-no-errors-were-fix.patch b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-exit-with-exit-status-0-if-no-errors-were-fix.patch new file mode 100644 index 000000000..44f3888b1 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/0001-e2fsck-exit-with-exit-status-0-if-no-errors-were-fix.patch @@ -0,0 +1,285 @@ +From b9bb77a0dd712f06b262a12766972b99cd801269 Mon Sep 17 00:00:00 2001 +From: Theodore Ts'o <tytso@mit.edu> +Date: Thu, 16 Feb 2017 22:02:35 -0500 +Subject: [PATCH] e2fsck: exit with exit status 0 if no errors were fixed + +Previously, e2fsck would exit with a status code of 1 even though the +only changes that it made to the file system were various +optimziations and not fixing file system corruption. Since the man +page states that an exit status of 1 means "file system errors +corrupted", fix e2fsck to return an exit status of 0. + +Upstream-Status: Backport + +Signed-off-by: Theodore Ts'o <tytso@mit.edu> +Signed-off-by: Daniel Schultz <d.schultz@phytec.de> + +Conflicts: + e2fsck/e2fsck.conf.5.in +--- + e2fsck/e2fsck.conf.5.in | 34 +++++++++++++++++++++++++++++ + e2fsck/journal.c | 1 + + e2fsck/problem.c | 8 ++++--- + e2fsck/problemP.h | 1 + + e2fsck/unix.c | 20 +++++++++++++---- + tests/f_collapse_extent_tree/expect.1 | 2 +- + tests/f_compress_extent_tree_level/expect.1 | 2 +- + tests/f_convert_bmap/expect.1 | 2 +- + tests/f_convert_bmap_and_extent/expect.1 | 2 +- + tests/f_extent_htree/expect.1 | 2 +- + tests/f_jnl_errno/expect.1 | 2 +- + tests/f_journal/expect.1 | 2 +- + tests/f_orphan/expect.1 | 2 +- + tests/f_orphan_extents_inode/expect.1 | 2 +- + tests/f_rehash_dir/expect.1 | 2 +- + tests/f_unsorted_EAs/expect.1 | 2 +- + 16 files changed, 68 insertions(+), 18 deletions(-) + +diff --git a/e2fsck/e2fsck.conf.5.in b/e2fsck/e2fsck.conf.5.in +index 1f80a04..6a205ce 100644 +--- a/e2fsck/e2fsck.conf.5.in ++++ b/e2fsck/e2fsck.conf.5.in +@@ -326,6 +326,40 @@ defaults to true. + This relation controls whether or not the scratch file directory is used + instead of an in-memory data structure when tracking inode counts. It + defaults to true. ++.TP ++.I not_a_fix ++This boolean option, it set to true, marks the problem as ++one where if the user gives permission to make the requested change, ++it does not mean that the file system had a problem which has since ++been fixed. This is used for requests to optimize the file system's ++data structure, such as pruning an extent tree. ++@TDB_MAN_COMMENT@.SH THE [scratch_files] STANZA ++@TDB_MAN_COMMENT@The following relations are defined in the ++@TDB_MAN_COMMENT@.I [scratch_files] ++@TDB_MAN_COMMENT@stanza. ++@TDB_MAN_COMMENT@.TP ++@TDB_MAN_COMMENT@.I directory ++@TDB_MAN_COMMENT@If the directory named by this relation exists and is ++@TDB_MAN_COMMENT@writeable, then e2fsck will attempt to use this ++@TDB_MAN_COMMENT@directory to store scratch files instead of using ++@TDB_MAN_COMMENT@in-memory data structures. ++@TDB_MAN_COMMENT@.TP ++@TDB_MAN_COMMENT@.I numdirs_threshold ++@TDB_MAN_COMMENT@If this relation is set, then in-memory data structures ++@TDB_MAN_COMMENT@be used if the number of directories in the filesystem ++@TDB_MAN_COMMENT@are fewer than amount specified. ++@TDB_MAN_COMMENT@.TP ++@TDB_MAN_COMMENT@.I dirinfo ++@TDB_MAN_COMMENT@This relation controls whether or not the scratch file ++@TDB_MAN_COMMENT@directory is used instead of an in-memory data ++@TDB_MAN_COMMENT@structure for directory information. It defaults to ++@TDB_MAN_COMMENT@true. ++@TDB_MAN_COMMENT@.TP ++@TDB_MAN_COMMENT@.I icount ++@TDB_MAN_COMMENT@This relation controls whether or not the scratch file ++@TDB_MAN_COMMENT@directory is used instead of an in-memory data ++@TDB_MAN_COMMENT@structure when tracking inode counts. It defaults to ++@TDB_MAN_COMMENT@true. + .SH LOGGING + E2fsck has the facility to save the information from an e2fsck run in a + directory so that a system administrator can review its output at their +diff --git a/e2fsck/journal.c b/e2fsck/journal.c +index c8ac57d..b4cf329 100644 +--- a/e2fsck/journal.c ++++ b/e2fsck/journal.c +@@ -572,6 +572,7 @@ static void clear_v2_journal_fields(journal_t *journal) + if (!fix_problem(ctx, PR_0_CLEAR_V2_JOURNAL, &pctx)) + return; + ++ ctx->flags |= E2F_FLAG_PROBLEMS_FIXED; + memset(((char *) journal->j_superblock) + V1_SB_SIZE, 0, + ctx->fs->blocksize-V1_SB_SIZE); + mark_buffer_dirty(journal->j_sb_buffer); +diff --git a/e2fsck/problem.c b/e2fsck/problem.c +index 1e645e4..2b01ffc 100644 +--- a/e2fsck/problem.c ++++ b/e2fsck/problem.c +@@ -1261,12 +1261,12 @@ static struct e2fsck_problem problem_table[] = { + /* Inode extent tree could be shorter */ + { PR_1E_CAN_COLLAPSE_EXTENT_TREE, + N_("@i %i @x tree (at level %b) could be shorter. "), +- PROMPT_FIX, PR_NO_OK | PR_PREEN_NO | PR_PREEN_OK }, ++ PROMPT_FIX, PR_NO_OK | PR_PREEN_NO | PR_PREEN_OK | PR_NOT_A_FIX }, + + /* Inode extent tree could be narrower */ + { PR_1E_CAN_NARROW_EXTENT_TREE, + N_("@i %i @x tree (at level %b) could be narrower. "), +- PROMPT_FIX, PR_NO_OK | PR_PREEN_NO | PR_PREEN_OK }, ++ PROMPT_FIX, PR_NO_OK | PR_PREEN_NO | PR_PREEN_OK | PR_NOT_A_FIX }, + + /* Pass 2 errors */ + +@@ -2146,6 +2146,7 @@ int fix_problem(e2fsck_t ctx, problem_t code, struct problem_context *pctx) + reconfigure_bool(ctx, ptr, key, PR_NO_NOMSG, "no_nomsg"); + reconfigure_bool(ctx, ptr, key, PR_PREEN_NOHDR, "preen_noheader"); + reconfigure_bool(ctx, ptr, key, PR_FORCE_NO, "force_no"); ++ reconfigure_bool(ctx, ptr, key, PR_NOT_A_FIX, "not_a_fix"); + profile_get_integer(ctx->profile, "options", + "max_count_problems", 0, 0, + &ptr->max_count); +@@ -2263,7 +2264,8 @@ int fix_problem(e2fsck_t ctx, problem_t code, struct problem_context *pctx) + if (ptr->flags & PR_AFTER_CODE) + answer = fix_problem(ctx, ptr->second_code, pctx); + +- if (answer && (ptr->prompt != PROMPT_NONE)) ++ if (answer && (ptr->prompt != PROMPT_NONE) && ++ !(ptr->flags & PR_NOT_A_FIX)) + ctx->flags |= E2F_FLAG_PROBLEMS_FIXED; + + return answer; +diff --git a/e2fsck/problemP.h b/e2fsck/problemP.h +index 7944cd6..63bb8df 100644 +--- a/e2fsck/problemP.h ++++ b/e2fsck/problemP.h +@@ -44,3 +44,4 @@ struct latch_descr { + #define PR_CONFIG 0x080000 /* This problem has been customized + from the config file */ + #define PR_FORCE_NO 0x100000 /* Force the answer to be no */ ++#define PR_NOT_A_FIX 0x200000 /* Yes doesn't mean a problem was fixed */ +diff --git a/e2fsck/unix.c b/e2fsck/unix.c +index 004a6e5..d33d7fd 100644 +--- a/e2fsck/unix.c ++++ b/e2fsck/unix.c +@@ -1896,11 +1896,23 @@ no_journal: + fix_problem(ctx, PR_6_IO_FLUSH, &pctx); + + if (was_changed) { +- exit_value |= FSCK_NONDESTRUCT; +- if (!(ctx->options & E2F_OPT_PREEN)) +- log_out(ctx, _("\n%s: ***** FILE SYSTEM WAS " +- "MODIFIED *****\n"), ++ int fs_fixed = (ctx->flags & E2F_FLAG_PROBLEMS_FIXED); ++ ++ if (fs_fixed) ++ exit_value |= FSCK_NONDESTRUCT; ++ if (!(ctx->options & E2F_OPT_PREEN)) { ++#if 0 /* Do this later; it breaks too many tests' golden outputs */ ++ log_out(ctx, fs_fixed ? ++ _("\n%s: ***** FILE SYSTEM ERRORS " ++ "CORRECTED *****\n") : ++ _("%s: File system was modified.\n"), + ctx->device_name); ++#else ++ log_out(ctx, ++ _("\n%s: ***** FILE SYSTEM WAS MODIFIED *****\n"), ++ ctx->device_name); ++#endif ++ } + if (ctx->mount_flags & EXT2_MF_ISROOT) { + log_out(ctx, _("%s: ***** REBOOT SYSTEM *****\n"), + ctx->device_name); +diff --git a/tests/f_collapse_extent_tree/expect.1 b/tests/f_collapse_extent_tree/expect.1 +index e2eb65e..8165a58 100644 +--- a/tests/f_collapse_extent_tree/expect.1 ++++ b/tests/f_collapse_extent_tree/expect.1 +@@ -13,4 +13,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/128 files (0.0% non-contiguous), 19/512 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_compress_extent_tree_level/expect.1 b/tests/f_compress_extent_tree_level/expect.1 +index a359c99..dd33f63 100644 +--- a/tests/f_compress_extent_tree_level/expect.1 ++++ b/tests/f_compress_extent_tree_level/expect.1 +@@ -20,4 +20,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/128 files (8.3% non-contiguous), 26/512 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_convert_bmap/expect.1 b/tests/f_convert_bmap/expect.1 +index 7d2ca86..c387962 100644 +--- a/tests/f_convert_bmap/expect.1 ++++ b/tests/f_convert_bmap/expect.1 +@@ -23,4 +23,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/128 files (8.3% non-contiguous), 570/2048 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_convert_bmap_and_extent/expect.1 b/tests/f_convert_bmap_and_extent/expect.1 +index 7af91aa..c86c571 100644 +--- a/tests/f_convert_bmap_and_extent/expect.1 ++++ b/tests/f_convert_bmap_and_extent/expect.1 +@@ -30,4 +30,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 13/128 files (15.4% non-contiguous), 574/2048 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_extent_htree/expect.1 b/tests/f_extent_htree/expect.1 +index 223ca69..ea48405 100644 +--- a/tests/f_extent_htree/expect.1 ++++ b/tests/f_extent_htree/expect.1 +@@ -26,4 +26,4 @@ test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + 0 sockets + ------------ + 343 files +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_jnl_errno/expect.1 b/tests/f_jnl_errno/expect.1 +index c572951..4134234 100644 +--- a/tests/f_jnl_errno/expect.1 ++++ b/tests/f_jnl_errno/expect.1 +@@ -6,4 +6,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 11/2048 files (9.1% non-contiguous), 1330/8192 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_journal/expect.1 b/tests/f_journal/expect.1 +index a202c80..0a18654 100644 +--- a/tests/f_journal/expect.1 ++++ b/tests/f_journal/expect.1 +@@ -59,4 +59,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 53/2048 files (1.9% non-contiguous), 1409/8192 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_orphan/expect.1 b/tests/f_orphan/expect.1 +index eddc1f8..087ebee 100644 +--- a/tests/f_orphan/expect.1 ++++ b/tests/f_orphan/expect.1 +@@ -11,4 +11,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/2048 files (0.0% non-contiguous), 1303/8192 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_orphan_extents_inode/expect.1 b/tests/f_orphan_extents_inode/expect.1 +index 2eaab78..5d713b3 100644 +--- a/tests/f_orphan_extents_inode/expect.1 ++++ b/tests/f_orphan_extents_inode/expect.1 +@@ -7,4 +7,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/16 files (0.0% non-contiguous), 21/100 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_rehash_dir/expect.1 b/tests/f_rehash_dir/expect.1 +index 6076765..c1449ba 100644 +--- a/tests/f_rehash_dir/expect.1 ++++ b/tests/f_rehash_dir/expect.1 +@@ -7,4 +7,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 105/2048 files (2.9% non-contiguous), 336/512 blocks +-Exit status is 1 ++Exit status is 0 +diff --git a/tests/f_unsorted_EAs/expect.1 b/tests/f_unsorted_EAs/expect.1 +index 7d588d7..64b9045 100644 +--- a/tests/f_unsorted_EAs/expect.1 ++++ b/tests/f_unsorted_EAs/expect.1 +@@ -8,4 +8,4 @@ Pass 5: Checking group summary information + + test_filesys: ***** FILE SYSTEM WAS MODIFIED ***** + test_filesys: 12/2048 files (0.0% non-contiguous), 1294/2048 blocks +-Exit status is 1 ++Exit status is 0 +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/mkdir_p.patch b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/mkdir_p.patch new file mode 100644 index 000000000..b0fa4b8cc --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs/mkdir_p.patch @@ -0,0 +1,20 @@ +e2fsprogs: expand @mkdir_p@ + +Add AC_SUBST to configure.ac. @mkdir_p@ is currently +not expanded so no locale data is written into usr/share/locale. + +Upstream-Status: Pending + +Signed-off-by: Joe Slater <jslater@windriver.com> + +--- a/configure.ac ++++ b/configure.ac +@@ -811,6 +811,8 @@ AC_SUBST(PACKAGE) + AC_SUBST(VERSION) + + AM_GNU_GETTEXT ++dnl @MKDIR_P@ is expanded in AM_GNU_GETTEXT ++AC_SUBST([mkdir_p],['$(MKDIR_P)']) + dnl + dnl End of configuration options + dnl diff --git a/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.43.bb b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.43.bb index f4855bc43..dcfb564a4 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.43.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/e2fsprogs/e2fsprogs_1.43.bb @@ -9,6 +9,8 @@ SRC_URI += "file://acinclude.m4 \ file://ptest.patch \ file://mkdir.patch \ file://Revert-mke2fs-enable-the-metadata_csum-and-64bit-fea.patch \ + file://mkdir_p.patch \ + file://0001-e2fsck-exit-with-exit-status-0-if-no-errors-were-fix.patch \ " SRC_URI_append_class-native = " file://e2fsprogs-fix-missing-check-for-permission-denied.patch" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-Add-GCC7-Wimplicit-fallthrough-support-fixes.patch b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-Add-GCC7-Wimplicit-fallthrough-support-fixes.patch new file mode 100644 index 000000000..a240323f3 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-Add-GCC7-Wimplicit-fallthrough-support-fixes.patch @@ -0,0 +1,318 @@ +From 09949994e76eea3c1230a5c88ffa8fdf588b120f Mon Sep 17 00:00:00 2001 +From: Mark Wielaard <mjw@redhat.com> +Date: Wed, 2 Nov 2016 13:29:26 +0100 +Subject: [PATCH] Add GCC7 -Wimplicit-fallthrough support/fixes. + +GCC7 will have a new -Wimplicit-fallthrough warning. It did catch one +small buglet in elflint option procession. So it seems useful to enable +to make sure all swatch case fallthroughs are deliberate. + +Add configure check to detect whether gcc support -Wimplicit-fallthrough +and enable it. Add fixes and explicit fallthrough comments where necessary. + +Signed-off-by: Mark Wielaard <mjw@redhat.com> + +Upstream-Status: Backport +Upstream-Commit: a3cc8182b2ae05290b0eafa74b70746d7befc0e4 +--- + backends/alpha_retval.c | 4 +--- + backends/i386_regs.c | 1 + + backends/i386_retval.c | 3 +-- + backends/linux-core-note.c | 4 ++-- + backends/ppc_regs.c | 2 +- + backends/x86_64_regs.c | 1 + + config/eu.am | 8 +++++++- + configure.ac | 10 ++++++++++ + libcpu/i386_disasm.c | 2 +- + libdw/cfi.c | 2 ++ + libdw/encoded-value.h | 1 + + libdwfl/dwfl_report_elf.c | 2 +- + src/addr2line.c | 1 + + src/elfcompress.c | 3 ++- + src/elflint.c | 4 +++- + src/objdump.c | 4 +++- + tests/backtrace-data.c | 1 + + tests/backtrace.c | 2 +- + 18 files changed, 40 insertions(+), 15 deletions(-) + +diff --git a/backends/alpha_retval.c b/backends/alpha_retval.c +index 53dbfa45..7232b462 100644 +--- a/backends/alpha_retval.c ++++ b/backends/alpha_retval.c +@@ -130,9 +130,7 @@ alpha_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) + return nloc_intreg; + } + } +- +- /* Else fall through. */ +- ++ /* Fallthrough */ + case DW_TAG_structure_type: + case DW_TAG_class_type: + case DW_TAG_union_type: +diff --git a/backends/i386_regs.c b/backends/i386_regs.c +index fb8ded33..fd963a62 100644 +--- a/backends/i386_regs.c ++++ b/backends/i386_regs.c +@@ -92,6 +92,7 @@ i386_register_info (Ebl *ebl __attribute__ ((unused)), + case 5: + case 8: + *type = DW_ATE_address; ++ /* Fallthrough */ + case 0 ... 3: + case 6 ... 7: + name[0] = 'e'; +diff --git a/backends/i386_retval.c b/backends/i386_retval.c +index 9da797d5..4aa646fe 100644 +--- a/backends/i386_retval.c ++++ b/backends/i386_retval.c +@@ -122,9 +122,8 @@ i386_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) + return nloc_intreg; + if (size <= 8) + return nloc_intregpair; +- +- /* Else fall through. */ + } ++ /* Fallthrough */ + + case DW_TAG_structure_type: + case DW_TAG_class_type: +diff --git a/backends/linux-core-note.c b/backends/linux-core-note.c +index ff2b226f..321721f3 100644 +--- a/backends/linux-core-note.c ++++ b/backends/linux-core-note.c +@@ -219,8 +219,8 @@ EBLHOOK(core_note) (const GElf_Nhdr *nhdr, const char *name, + case sizeof "CORE": + if (memcmp (name, "CORE", nhdr->n_namesz) == 0) + break; +- /* Buggy old Linux kernels didn't terminate "LINUX". +- Fall through. */ ++ /* Buggy old Linux kernels didn't terminate "LINUX". */ ++ /* Fall through. */ + + case sizeof "LINUX": + if (memcmp (name, "LINUX", nhdr->n_namesz) == 0) +diff --git a/backends/ppc_regs.c b/backends/ppc_regs.c +index 4b92a9aa..bcf4f7a3 100644 +--- a/backends/ppc_regs.c ++++ b/backends/ppc_regs.c +@@ -140,7 +140,7 @@ ppc_register_info (Ebl *ebl __attribute__ ((unused)), + case 100: + if (*bits == 32) + return stpcpy (name, "mq") + 1 - name; +- ++ /* Fallthrough */ + case 102 ... 107: + name[0] = 's'; + name[1] = 'p'; +diff --git a/backends/x86_64_regs.c b/backends/x86_64_regs.c +index 2172d9f1..84304407 100644 +--- a/backends/x86_64_regs.c ++++ b/backends/x86_64_regs.c +@@ -87,6 +87,7 @@ x86_64_register_info (Ebl *ebl __attribute__ ((unused)), + + case 6 ... 7: + *type = DW_ATE_address; ++ /* Fallthrough */ + case 0 ... 5: + name[0] = 'r'; + name[1] = baseregs[regno][0]; +diff --git a/config/eu.am b/config/eu.am +index 4998771d..8fe1e259 100644 +--- a/config/eu.am ++++ b/config/eu.am +@@ -61,10 +61,16 @@ else + NULL_DEREFERENCE_WARNING= + endif + ++if HAVE_IMPLICIT_FALLTHROUGH_WARNING ++IMPLICIT_FALLTHROUGH_WARNING=-Wimplicit-fallthrough ++else ++IMPLICIT_FALLTHROUGH_WARNING= ++endif ++ + AM_CFLAGS = -std=gnu99 -Wall -Wshadow -Wformat=2 \ + -Wold-style-definition -Wstrict-prototypes \ + $(LOGICAL_OP_WARNING) $(DUPLICATED_COND_WARNING) \ +- $(NULL_DEREFERENCE_WARNING) \ ++ $(NULL_DEREFERENCE_WARNING) $(IMPLICIT_FALLTHROUGH_WARNING) \ + $(if $($(*F)_no_Werror),,-Werror) \ + $(if $($(*F)_no_Wunused),,-Wunused -Wextra) \ + $(if $($(*F)_no_Wstack_usage),,$(STACK_USAGE_WARNING)) \ +diff --git a/configure.ac b/configure.ac +index 86a69c66..35850c64 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -336,6 +336,16 @@ CFLAGS="$old_CFLAGS"]) + AM_CONDITIONAL(HAVE_NULL_DEREFERENCE_WARNING, + [test "x$ac_cv_null_dereference" != "xno"]) + ++# -Wimplicit-fallthrough was added by GCC7 ++AC_CACHE_CHECK([whether gcc accepts -Wimplicit-fallthrough], ac_cv_implicit_fallthrough, [dnl ++old_CFLAGS="$CFLAGS" ++CFLAGS="$CFLAGS -Wimplicit-fallthrough -Werror" ++AC_COMPILE_IFELSE([AC_LANG_SOURCE([])], ++ ac_cv_implicit_fallthrough=yes, ac_cv_implicit_fallthrough=no) ++CFLAGS="$old_CFLAGS"]) ++AM_CONDITIONAL(HAVE_IMPLICIT_FALLTHROUGH_WARNING, ++ [test "x$ac_cv_implicit_fallthrough" != "xno"]) ++ + dnl Check if we have argp available from our libc + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( +diff --git a/libcpu/i386_disasm.c b/libcpu/i386_disasm.c +index 832241f2..1a584635 100644 +--- a/libcpu/i386_disasm.c ++++ b/libcpu/i386_disasm.c +@@ -819,7 +819,7 @@ i386_disasm (const uint8_t **startp, const uint8_t *end, GElf_Addr addr, + ++param_start; + break; + } +- ++ /* Fallthrough */ + default: + assert (! "INVALID not handled"); + } +diff --git a/libdw/cfi.c b/libdw/cfi.c +index 1fd668d7..daa845f3 100644 +--- a/libdw/cfi.c ++++ b/libdw/cfi.c +@@ -138,6 +138,7 @@ execute_cfi (Dwarf_CFI *cache, + + case DW_CFA_advance_loc1: + operand = *program++; ++ /* Fallthrough */ + case DW_CFA_advance_loc + 0 ... DW_CFA_advance_loc + CFI_PRIMARY_MAX: + advance_loc: + loc += operand * cie->code_alignment_factor; +@@ -300,6 +301,7 @@ execute_cfi (Dwarf_CFI *cache, + + case DW_CFA_restore_extended: + get_uleb128 (operand, program, end); ++ /* Fallthrough */ + case DW_CFA_restore + 0 ... DW_CFA_restore + CFI_PRIMARY_MAX: + + if (unlikely (abi_cfi) && likely (opcode == DW_CFA_restore)) +diff --git a/libdw/encoded-value.h b/libdw/encoded-value.h +index 48d868fb..f0df4cec 100644 +--- a/libdw/encoded-value.h ++++ b/libdw/encoded-value.h +@@ -64,6 +64,7 @@ encoded_value_size (const Elf_Data *data, const unsigned char e_ident[], + if (*end++ & 0x80u) + return end - p; + } ++ return 0; + + default: + return 0; +diff --git a/libdwfl/dwfl_report_elf.c b/libdwfl/dwfl_report_elf.c +index 1c6e401d..73a5511a 100644 +--- a/libdwfl/dwfl_report_elf.c ++++ b/libdwfl/dwfl_report_elf.c +@@ -170,7 +170,7 @@ __libdwfl_elf_address_range (Elf *elf, GElf_Addr base, bool add_p_vaddr, + /* An assigned base address is meaningless for these. */ + base = 0; + add_p_vaddr = true; +- ++ /* Fallthrough. */ + case ET_DYN: + default:; + size_t phnum; +diff --git a/src/addr2line.c b/src/addr2line.c +index 0ce854f6..bea24aea 100644 +--- a/src/addr2line.c ++++ b/src/addr2line.c +@@ -632,6 +632,7 @@ handle_address (const char *string, Dwfl *dwfl) + case 1: + addr = 0; + j = i; ++ /* Fallthrough */ + case 2: + if (string[j] != '\0') + break; +diff --git a/src/elfcompress.c b/src/elfcompress.c +index d0ca469c..57afa116 100644 +--- a/src/elfcompress.c ++++ b/src/elfcompress.c +@@ -153,7 +153,8 @@ parse_opt (int key, char *arg __attribute__ ((unused)), + argp_error (state, + N_("Only one input file allowed together with '-o'")); + /* We only use this for checking the number of arguments, we don't +- actually want to consume them, so fallthrough. */ ++ actually want to consume them. */ ++ /* Fallthrough */ + default: + return ARGP_ERR_UNKNOWN; + } +diff --git a/src/elflint.c b/src/elflint.c +index 15b12f6f..2c45fcb8 100644 +--- a/src/elflint.c ++++ b/src/elflint.c +@@ -210,6 +210,7 @@ parse_opt (int key, char *arg __attribute__ ((unused)), + + case 'd': + is_debuginfo = true; ++ break; + + case ARGP_gnuld: + gnuld = true; +@@ -3963,6 +3964,7 @@ section [%2zu] '%s': merge flag set but entry size is zero\n"), + case SHT_NOBITS: + if (is_debuginfo) + break; ++ /* Fallthrough */ + default: + ERROR (gettext ("\ + section [%2zu] '%s' has unexpected type %d for an executable section\n"), +@@ -4305,7 +4307,7 @@ section [%2d] '%s': unknown core file note type %" PRIu32 + if (nhdr.n_namesz == sizeof "Linux" + && !memcmp (data->d_buf + name_offset, "Linux", sizeof "Linux")) + break; +- ++ /* Fallthrough */ + default: + if (shndx == 0) + ERROR (gettext ("\ +diff --git a/src/objdump.c b/src/objdump.c +index 0aa41e89..94e9e021 100644 +--- a/src/objdump.c ++++ b/src/objdump.c +@@ -234,7 +234,9 @@ parse_opt (int key, char *arg, + program_invocation_short_name); + exit (EXIT_FAILURE); + } +- ++ /* We only use this for checking the number of arguments, we don't ++ actually want to consume them. */ ++ /* Fallthrough */ + default: + return ARGP_ERR_UNKNOWN; + } +diff --git a/tests/backtrace-data.c b/tests/backtrace-data.c +index bc5ceba0..b7158dae 100644 +--- a/tests/backtrace-data.c ++++ b/tests/backtrace-data.c +@@ -250,6 +250,7 @@ thread_callback (Dwfl_Thread *thread, void *thread_arg __attribute__ ((unused))) + break; + case -1: + error (1, 0, "dwfl_thread_getframes: %s", dwfl_errmsg (-1)); ++ break; + default: + abort (); + } +diff --git a/tests/backtrace.c b/tests/backtrace.c +index 12476430..bf5995b4 100644 +--- a/tests/backtrace.c ++++ b/tests/backtrace.c +@@ -123,7 +123,7 @@ callback_verify (pid_t tid, unsigned frameno, Dwarf_Addr pc, + assert (symname2 == NULL || strcmp (symname2, "jmp") != 0); + break; + } +- /* PASSTHRU */ ++ /* FALLTHRU */ + case 4: + assert (symname != NULL && strcmp (symname, "stdarg") == 0); + break; +-- +2.13.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-ar-Fix-GCC7-Wformat-length-issues.patch b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-ar-Fix-GCC7-Wformat-length-issues.patch new file mode 100644 index 000000000..346547678 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/0001-ar-Fix-GCC7-Wformat-length-issues.patch @@ -0,0 +1,125 @@ +From f090883ca61f0bf0f979c5b26d4e1a69e805156e Mon Sep 17 00:00:00 2001 +From: Mark Wielaard <mjw@redhat.com> +Date: Thu, 10 Nov 2016 18:45:02 +0100 +Subject: [PATCH] ar: Fix GCC7 -Wformat-length issues. + +GCC7 adds warnings for snprintf formatting into too small buffers. +Fix the two issues pointed out by the new warning. The ar header +fields are fixed length containing left-justified strings without +zero terminator. snprintf always adds a '\0' char at the end (which +we then don't copy into the ar header field) and numbers are decimal +strings of fixed 10 chars (-Wformat-length thinks formatting +them as size_t might overflow the buffer on 64bit arches). + +Signed-off-by: Mark Wielaard <mjw@redhat.com> + +Upstream-Status: Backport +Upstream-Commit: d5afff85e22b38949f3e7936231c67de16e180e8 +--- + src/ar.c | 15 +++++++++++---- + src/arlib.c | 16 ++++++++++------ + 2 files changed, 21 insertions(+), 10 deletions(-) + +diff --git a/src/ar.c b/src/ar.c +index 1320d07b..f2160d35 100644 +--- a/src/ar.c ++++ b/src/ar.c +@@ -1,5 +1,5 @@ + /* Create, modify, and extract from archives. +- Copyright (C) 2005-2012 Red Hat, Inc. ++ Copyright (C) 2005-2012, 2016 Red Hat, Inc. + This file is part of elfutils. + Written by Ulrich Drepper <drepper@redhat.com>, 2005. + +@@ -853,7 +853,10 @@ write_member (struct armem *memb, off_t *startp, off_t *lenp, Elf *elf, + off_t end_off, int newfd) + { + struct ar_hdr arhdr; +- char tmpbuf[sizeof (arhdr.ar_name) + 1]; ++ /* The ar_name is not actually zero teminated, but we need that for ++ snprintf. Also if the name is too long, then the string starts ++ with '/' plus an index off number (decimal). */ ++ char tmpbuf[sizeof (arhdr.ar_name) + 2]; + + bool changed_header = memb->long_name_off != -1; + if (changed_header) +@@ -1455,7 +1458,11 @@ do_oper_insert (int oper, const char *arfname, char **argv, int argc, + + /* Create the header. */ + struct ar_hdr arhdr; +- char tmpbuf[sizeof (arhdr.ar_name) + 1]; ++ /* The ar_name is not actually zero teminated, but we ++ need that for snprintf. Also if the name is too ++ long, then the string starts with '/' plus an index ++ off number (decimal). */ ++ char tmpbuf[sizeof (arhdr.ar_name) + 2]; + if (all->long_name_off == -1) + { + size_t namelen = strlen (all->name); +@@ -1465,7 +1472,7 @@ do_oper_insert (int oper, const char *arfname, char **argv, int argc, + } + else + { +- snprintf (tmpbuf, sizeof (arhdr.ar_name) + 1, "/%-*ld", ++ snprintf (tmpbuf, sizeof (tmpbuf), "/%-*ld", + (int) sizeof (arhdr.ar_name), all->long_name_off); + memcpy (arhdr.ar_name, tmpbuf, sizeof (arhdr.ar_name)); + } +diff --git a/src/arlib.c b/src/arlib.c +index 43a9145b..0c2e4cde 100644 +--- a/src/arlib.c ++++ b/src/arlib.c +@@ -1,5 +1,5 @@ + /* Functions to handle creation of Linux archives. +- Copyright (C) 2007-2012 Red Hat, Inc. ++ Copyright (C) 2007-2012, 2016 Red Hat, Inc. + This file is part of elfutils. + Written by Ulrich Drepper <drepper@redhat.com>, 2007. + +@@ -23,6 +23,7 @@ + #include <assert.h> + #include <error.h> + #include <gelf.h> ++#include <inttypes.h> + #include <libintl.h> + #include <stdio.h> + #include <stdlib.h> +@@ -107,6 +108,9 @@ arlib_init (void) + void + arlib_finalize (void) + { ++ /* Note that the size is stored as decimal string in 10 chars, ++ without zero terminator (we add + 1 here only so snprintf can ++ put it at the end, we then don't use it when we memcpy it). */ + char tmpbuf[sizeof (((struct ar_hdr *) NULL)->ar_size) + 1]; + + symtab.longnameslen = obstack_object_size (&symtab.longnamesob); +@@ -121,9 +125,9 @@ arlib_finalize (void) + + symtab.longnames = obstack_finish (&symtab.longnamesob); + +- int s = snprintf (tmpbuf, sizeof (tmpbuf), "%-*zu", ++ int s = snprintf (tmpbuf, sizeof (tmpbuf), "%-*" PRIu32 "", + (int) sizeof (((struct ar_hdr *) NULL)->ar_size), +- symtab.longnameslen - sizeof (struct ar_hdr)); ++ (uint32_t) (symtab.longnameslen - sizeof (struct ar_hdr))); + memcpy (&((struct ar_hdr *) symtab.longnames)->ar_size, tmpbuf, s); + } + +@@ -169,10 +173,10 @@ arlib_finalize (void) + + /* See comment for ar_date above. */ + memcpy (&((struct ar_hdr *) symtab.symsoff)->ar_size, tmpbuf, +- snprintf (tmpbuf, sizeof (tmpbuf), "%-*zu", ++ snprintf (tmpbuf, sizeof (tmpbuf), "%-*" PRIu32 "", + (int) sizeof (((struct ar_hdr *) NULL)->ar_size), +- symtab.symsofflen + symtab.symsnamelen +- - sizeof (struct ar_hdr))); ++ (uint32_t) (symtab.symsofflen + symtab.symsnamelen ++ - sizeof (struct ar_hdr)))); + } + + +-- +2.13.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/Fix_one_GCC7_warning.patch b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/Fix_one_GCC7_warning.patch new file mode 100644 index 000000000..25f5e1482 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/Fix_one_GCC7_warning.patch @@ -0,0 +1,45 @@ +From 8e2ab18b874d1fda06243ad00209d44e2992928a Mon Sep 17 00:00:00 2001 +From: Mark Wielaard <mark@klomp.org> +Date: Sun, 12 Feb 2017 21:51:34 +0100 +Subject: [PATCH 1/2] libasm: Fix one GCC7 -Wformat-truncation=2 warning. + +Make sure that if we have really lots of labels the tempsym doesn't get +truncated because it is too small to hold the whole name. + +This doesn't enable -Wformat-truncation=2 or fix other "issues" pointed +out by enabling this warning because there are currently some issues +with it. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79448 + +Signed-off-by: Mark Wielaard <mark@klomp.org> + +Upstream-Status: Backport (https://sourceware.org/git/?p=elfutils.git;a=commit;h=93c51144c3f664d4e9709da75a1d0fa00ea0fe95) +Signed-off-by: Joshua Lock <joshua.g.lock@intel.com> +--- + libasm/asm_newsym.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/libasm/asm_newsym.c b/libasm/asm_newsym.c +index 7f522910..76482bb2 100644 +--- a/libasm/asm_newsym.c ++++ b/libasm/asm_newsym.c +@@ -1,5 +1,5 @@ + /* Define new symbol for current position in given section. +- Copyright (C) 2002, 2005 Red Hat, Inc. ++ Copyright (C) 2002, 2005, 2017 Red Hat, Inc. + This file is part of elfutils. + Written by Ulrich Drepper <drepper@redhat.com>, 2002. + +@@ -44,7 +44,9 @@ AsmSym_t * + asm_newsym (AsmScn_t *asmscn, const char *name, GElf_Xword size, + int type, int binding) + { +-#define TEMPSYMLEN 10 ++/* We don't really expect labels with many digits, but in theory it could ++ be 10 digits (plus ".L" and a zero terminator). */ ++#define TEMPSYMLEN 13 + char tempsym[TEMPSYMLEN]; + AsmSym_t *result; + +-- +2.13.0 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/fallthrough.patch b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/fallthrough.patch new file mode 100644 index 000000000..b2623f9d2 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils-0.166/fallthrough.patch @@ -0,0 +1,36 @@ +GCC7 adds -Wimplicit-fallthrough to warn when a switch case falls through, +however this causes warnings (which are promoted to errors) with the elfutils +patches from Debian for mips and parisc, which use fallthrough's by design. + +Explicitly mark the intentional fallthrough switch cases with a comment to +disable the warnings where the fallthrough behaviour is desired. + +Upstream-Status: Pending [debian] +Signed-off-by: Joshua Lock <joshua.g.lock@intel.com> + +Index: elfutils-0.168/backends/parisc_retval.c +=================================================================== +--- elfutils-0.168.orig/backends/parisc_retval.c ++++ elfutils-0.168/backends/parisc_retval.c +@@ -166,7 +166,7 @@ parisc_return_value_location_ (Dwarf_Die + return nloc_intregpair; + + /* Else fall through. */ +- } ++ } // fallthrough + + case DW_TAG_structure_type: + case DW_TAG_class_type: +Index: elfutils-0.168/backends/mips_retval.c +=================================================================== +--- elfutils-0.168.orig/backends/mips_retval.c ++++ elfutils-0.168/backends/mips_retval.c +@@ -387,7 +387,7 @@ mips_return_value_location (Dwarf_Die *f + else + return nloc_intregpair; + } +- } ++ } // fallthrough + + /* Fallthrough to handle large types */ + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.148.bb b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.148.bb index 6080c179f..d18b732fe 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.148.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.148.bb @@ -8,8 +8,7 @@ DEPENDS = "libtool bzip2 zlib virtual/libintl" PR = "r11" -SRC_URI = "https://fedorahosted.org/releases/e/l/${BPN}/${BP}.tar.bz2" - +SRC_URI = "ftp://sourceware.org/pub/elfutils/${PV}/${BP}.tar.bz2" SRC_URI[md5sum] = "a0bed1130135f17ad27533b0034dba8d" SRC_URI[sha256sum] = "8aebfa4a745db21cf5429c9541fe482729b62efc7e53e9110151b4169fe887da" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.166.bb b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.166.bb index 5c436d386..3593c1c1d 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.166.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/elfutils/elfutils_0.166.bb @@ -1,12 +1,12 @@ SUMMARY = "Utilities and libraries for handling compiled object files" -HOMEPAGE = "https://fedorahosted.org/elfutils" +HOMEPAGE = "https://sourceware.org/elfutils" SECTION = "base" LICENSE = "(GPLv3 & Elfutils-Exception)" LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" DEPENDS = "libtool bzip2 zlib virtual/libintl" DEPENDS_append_libc-musl = " argp-standalone fts " -SRC_URI = "https://fedorahosted.org/releases/e/l/elfutils/${PV}/${BP}.tar.bz2" +SRC_URI = "ftp://sourceware.org/pub/elfutils/${PV}/${BP}.tar.bz2" SRC_URI[md5sum] = "d4e462b7891915dc5326bccefa2024ff" SRC_URI[sha256sum] = "3c056914c8a438b210be0d790463b960fc79d234c3f05ce707cbff80e94cba30" @@ -18,7 +18,10 @@ SRC_URI += "\ file://0001-remove-the-unneed-checking.patch \ file://0001-fix-a-stack-usage-warning.patch \ file://aarch64_uio.patch \ + file://Fix_one_GCC7_warning.patch \ + file://0001-Add-GCC7-Wimplicit-fallthrough-support-fixes.patch \ file://shadow.patch \ + file://0001-ar-Fix-GCC7-Wformat-length-issues.patch \ " # pick the patch from debian @@ -37,6 +40,8 @@ SRC_URI += "\ file://uclibc-support.patch \ file://elfcmp-fix-self-comparision.patch \ " +# Fix the patches from Debian with GCC7 +SRC_URI += "file://fallthrough.patch" SRC_URI_append_libc-musl = " file://0001-build-Provide-alternatives-for-glibc-assumptions-hel.patch " # The buildsystem wants to generate 2 .h files from source using a binary it just built, diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2.inc b/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2.inc index b118995e1..39ae65380 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2.inc @@ -80,8 +80,9 @@ SRC_URI = "\ file://0047-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch \ ${BACKPORTS} \ " -BACKPORTS = "" - +BACKPORTS = "\ + file://ubsan-fix-check-empty-string.patch \ +" SRC_URI[md5sum] = "9768625159663b300ae4de2f4745fcc4" SRC_URI[sha256sum] = "9944589fc722d3e66308c0ce5257788ebd7872982a718aa2516123940671b7c5" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2/ubsan-fix-check-empty-string.patch b/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2/ubsan-fix-check-empty-string.patch new file mode 100644 index 000000000..c0127198e --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/gcc/gcc-6.2/ubsan-fix-check-empty-string.patch @@ -0,0 +1,28 @@ +From 8db2cf6353c13f2a84cbe49b689654897906c499 Mon Sep 17 00:00:00 2001 +From: kyukhin <kyukhin@138bc75d-0d04-0410-961f-82ee72b054a4> +Date: Sat, 3 Sep 2016 10:57:05 +0000 +Subject: [PATCH] gcc/ * ubsan.c (ubsan_use_new_style_p): Fix check for empty + string. + +git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@239971 138bc75d-0d04-0410-961f-82ee72b054a4 + +Upstream-Status: Backport +Signed-off-by: Joshua Lock <joshua.g.lock@intel.com> + +--- + gcc/ubsan.c | 2 +- + 2 files changed, 5 insertions(+), 1 deletion(-) + +Index: gcc-6.3.0/gcc/ubsan.c +=================================================================== +--- gcc-6.3.0.orig/gcc/ubsan.c ++++ gcc-6.3.0/gcc/ubsan.c +@@ -1471,7 +1471,7 @@ ubsan_use_new_style_p (location_t loc) + + expanded_location xloc = expand_location (loc); + if (xloc.file == NULL || strncmp (xloc.file, "\1", 2) == 0 +- || xloc.file == '\0' || xloc.file[0] == '\xff' ++ || xloc.file[0] == '\0' || xloc.file[0] == '\xff' + || xloc.file[1] == '\xff') + return false; + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gcc/libgcc-common.inc b/import-layers/yocto-poky/meta/recipes-devtools/gcc/libgcc-common.inc index 8a13f542c..c4de31c34 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gcc/libgcc-common.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/gcc/libgcc-common.inc @@ -144,6 +144,9 @@ BASETARGET_SYS = "${@get_original_os(d)}" addtask extra_symlinks after do_multilib_install before do_package do_populate_sysroot fakeroot python do_extra_symlinks() { + if bb.data.inherits_class('nativesdk', d): + return + targetsys = d.getVar('BASETARGET_SYS', True) if targetsys != d.getVar('TARGET_SYS', True): diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.1.inc b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.1.inc new file mode 100644 index 000000000..d9dfe6f3f --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.1.inc @@ -0,0 +1,22 @@ +LICENSE = "GPLv2 & GPLv3 & LGPLv2 & LGPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \ + file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \ + file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \ + file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674" + +SRC_URI = "http://ftp.gnu.org/gnu/gdb/gdb-${PV}.tar.xz \ + file://0001-include-sys-types.h-for-mode_t.patch \ + file://0002-make-man-install-relative-to-DESTDIR.patch \ + file://0003-mips-linux-nat-Define-_ABIO32-if-not-defined.patch \ + file://0004-ppc-ptrace-Define-pt_regs-uapi_pt_regs-on-GLIBC-syst.patch \ + file://0005-Add-support-for-Renesas-SH-sh4-architecture.patch \ + file://0006-Dont-disable-libreadline.a-when-using-disable-static.patch \ + file://0007-use-asm-sgidefs.h.patch \ + file://0008-Use-exorted-definitions-of-SIGRTMIN.patch \ + file://0009-Change-order-of-CFLAGS.patch \ + file://0010-resolve-restrict-keyword-conflict.patch \ + file://0011-avx_mpx.patch \ +" + +SRC_URI[md5sum] = "5aa71522e488e358243917967db87476" +SRC_URI[sha256sum] = "e9216da4e3755e9f414c1aa0026b626251dfc57ffe572a266e98da4f6988fc70" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.inc b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.inc deleted file mode 100644 index a9267d554..000000000 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-7.11.inc +++ /dev/null @@ -1,9 +0,0 @@ -LICENSE = "GPLv2 & GPLv3 & LGPLv2 & LGPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \ - file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \ - file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \ - file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674" - -SRC_URI[md5sum] = "b93a2721393e5fa226375b42d567d90b" -SRC_URI[sha256sum] = "ff14f8050e6484508c73cbfa63731e57901478490ca1672dc0b5e2b03f6af622" - diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-common.inc b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-common.inc index 09231434d..33a5ce983 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-common.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-common.inc @@ -1,6 +1,5 @@ SUMMARY = "GNU debugger" HOMEPAGE = "http://www.gnu.org/software/gdb/" -LICENSE = "GPLv3+" SECTION = "devel" DEPENDS = "expat zlib ncurses virtual/libiconv ${LTTNGUST}" @@ -16,33 +15,10 @@ LTTNGUST_mips64eln32 = "" LTTNGUST_sh4 = "" LTTNGUST_libc-musl = "" -LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \ - file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674 \ - file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \ - file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6" - inherit autotools texinfo -SRCREV = "1a982b689ce4e20523bdf69e47fdd574c4f63934" - -SRC_URI = "git://sourceware.org/git/binutils-gdb.git;branch=gdb-7.11-branch \ - file://0001-include-sys-types.h-for-mode_t.patch \ - file://0002-make-man-install-relative-to-DESTDIR.patch \ - file://0003-mips-linux-nat-Define-_ABIO32-if-not-defined.patch \ - file://0004-ppc-ptrace-Define-pt_regs-uapi_pt_regs-on-GLIBC-syst.patch \ - file://0005-Add-support-for-Renesas-SH-sh4-architecture.patch \ - file://0006-Dont-disable-libreadline.a-when-using-disable-static.patch \ - file://0007-use-asm-sgidefs.h.patch \ - file://0008-Use-exorted-definitions-of-SIGRTMIN.patch \ - file://0009-Change-order-of-CFLAGS.patch \ - file://0010-resolve-restrict-keyword-conflict.patch \ - file://0011-avx_mpx.patch \ -" - UPSTREAM_CHECK_GITTAGREGEX = "gdb\-(?P<pver>.+)\-release" -S = "${WORKDIR}/git" - B = "${WORKDIR}/build-${TARGET_SYS}" EXTRA_OEMAKE = "'SUBDIRS=intl mmalloc libiberty opcodes bfd sim gdb etc utils'" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian.inc b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian.inc index e53081d0c..3ff198953 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian.inc @@ -14,7 +14,8 @@ GDBPROPREFIX = "--program-prefix='${TARGET_PREFIX}'" PACKAGECONFIG ??= "python readline" PACKAGECONFIG[python] = "--with-python=${WORKDIR}/python,--without-python,nativesdk-python3, \ nativesdk-python3-core nativesdk-python3-lang nativesdk-python3-re \ - nativesdk-python3-codecs nativesdk-python3-netclient" + nativesdk-python3-codecs nativesdk-python3-netclient \ + nativesdk-python3-importlib" PACKAGECONFIG[readline] = "--with-system-readline,--without-system-readline,nativesdk-readline" SSTATE_DUPWHITELIST += "${STAGING_DATADIR}/gdb" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian_7.11.bb b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian_7.11.1.bb index 301035940..301035940 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian_7.11.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross-canadian_7.11.1.bb diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross_7.11.bb b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross_7.11.1.bb index 50cf159fd..50cf159fd 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross_7.11.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb-cross_7.11.1.bb diff --git a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb_7.11.bb b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb_7.11.1.bb index 57cffc998..57cffc998 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb_7.11.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/gdb/gdb_7.11.1.bb diff --git a/import-layers/yocto-poky/meta/recipes-devtools/perl/perl/perl-test-customized.patch b/import-layers/yocto-poky/meta/recipes-devtools/perl/perl/perl-test-customized.patch index 84b0b88f2..477be29ef 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/perl/perl/perl-test-customized.patch +++ b/import-layers/yocto-poky/meta/recipes-devtools/perl/perl/perl-test-customized.patch @@ -17,7 +17,7 @@ Index: perl-5.22.1/t/porting/customized.dat ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/Liblist.pm bef099988b15fb0b2a1f5ac48c01af1f7f36d329 -ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/Liblist/Kid.pm 8168e18f0e3ce3ece4bb7e7c72d57ec07c67c402 -ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/MakeMaker.pm 7115e97a53559cb3ec061dd6f7f344e522724c4a -+ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/Liblist/Kid.pm a08ecf80c8f0a234243817713b2a5ab0dcae3c0a ++ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/Liblist/Kid.pm 8c22e119b96d674f1f268a9c495bb4aa04e1100b +ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/MakeMaker.pm 3d7abd674b15ed323f743594ef0bd09db76b1aee ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/MakeMaker/Config.pm f8db8d4245bf0684b8210c811f50d7cfb1a27d78 ExtUtils::MakeMaker cpan/ExtUtils-MakeMaker/lib/ExtUtils/MakeMaker/FAQ.pod 757bffb47857521311f8f3bde43ebe165f8d5191 diff --git a/import-layers/yocto-poky/meta/recipes-devtools/pseudo/files/More-correctly-fix-xattrs.patch b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/files/More-correctly-fix-xattrs.patch new file mode 100644 index 000000000..3d178f9b4 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/files/More-correctly-fix-xattrs.patch @@ -0,0 +1,37 @@ +From 45eca34c754d416a38bee90fb2d3c110a0b6cc5f Mon Sep 17 00:00:00 2001 +From: Seebs <seebs@seebs.net> +Date: Thu, 3 Nov 2016 11:36:12 -0500 +Subject: [PATCH] More-correctly fix xattrs + +Fix provided by Patrick Ohly <patrick.ohly@intel.com>. This resolves +the actual cause of the path length mismatches, and explains why +I couldn't quite explain why the previous one had only sometimes +worked, also why it showed up on directories but not plain files. + +Signed-off-by: Seebs <seebs@seebs.net> + +Fixes [YOCTO #10623] + +Upstream-Status: Backport [commit 45eca34c754d416a38bee90fb2d3c110a0b6cc5f] + +Signed-off-by: Patrick Ohly <patrick.ohly@intel.com> +--- + pseudo_client.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pseudo_client.c b/pseudo_client.c +index 6a08df3..b1a00fa 100644 +--- a/pseudo_client.c ++++ b/pseudo_client.c +@@ -1676,7 +1676,7 @@ pseudo_client_op(pseudo_op_t op, int access, int fd, int dirfd, const char *path + * empty path for that. + */ + if (path_extra_1) { +- size_t full_len = path_extra_1len + 1 + pathlen; ++ size_t full_len = path_extra_1len + 1 + pathlen - strip_slash; + size_t partial_len = pathlen - 1 - strip_slash; + if (path_extra_2) { + full_len += path_extra_2len + 1; +-- +2.1.4 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.8.1.bb b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.8.1.bb index fb70034b4..90b53c0c1 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.8.1.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_1.8.1.bb @@ -10,6 +10,7 @@ SRC_URI = "http://downloads.yoctoproject.org/releases/pseudo/${BPN}-${PV}.tar.bz file://0001-Quiet-diagnostics-during-startup-for-pseudo-d.patch \ file://0002-Use-correct-file-descriptor.patch \ file://0003-Fix-renameat-parallel-to-previous-fix-to-rename.patch \ + file://More-correctly-fix-xattrs.patch \ " SRC_URI[md5sum] = "ee38e4fb62ff88ad067b1a5a3825bac7" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb index 8110b1a19..ac923bbb7 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/pseudo/pseudo_git.bb @@ -1,6 +1,6 @@ require pseudo.inc -SRCREV = "befc6dbd6469d428c9e0830dbe51bdf7ac39d9ae" +SRCREV = "45eca34c754d416a38bee90fb2d3c110a0b6cc5f" PV = "1.8.1+git${SRCPV}" DEFAULT_PREFERENCE = "-1" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/files/d70d37b7c4aa2af3fe879a0d858c54f2aa32a725.patch b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/files/d70d37b7c4aa2af3fe879a0d858c54f2aa32a725.patch new file mode 100644 index 000000000..08cb078fa --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/files/d70d37b7c4aa2af3fe879a0d858c54f2aa32a725.patch @@ -0,0 +1,47 @@ +From 154b2c19f392817a936aea0190e276f8228cb489 Mon Sep 17 00:00:00 2001 +From: "Erik M. Bray" <erik.bray@lri.fr> +Date: Mon, 12 Dec 2016 13:07:16 +0100 +Subject: [PATCH] BUG: xlocale.h is not available in newlib--all the defines + used here from xlocale.h are instead found in locale.h + +Added a feature check for xlocale.h, with fallback to locale.h if it is +missing. +--- + numpy/core/setup_common.py | 1 + + numpy/core/src/multiarray/numpyos.c | 8 +++++++- + 2 files changed, 8 insertions(+), 1 deletion(-) + +Upstream-Status: Backport +RP 2017/9/6 + +diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py +index ba7521e3043..a1729e65656 100644 +--- a/numpy/core/setup_common.py ++++ b/numpy/core/setup_common.py +@@ -113,6 +113,7 @@ def check_api_version(apiversion, codegen_dir): + "xmmintrin.h", # SSE + "emmintrin.h", # SSE2 + "features.h", # for glibc version linux ++ "xlocale.h" # see GH#8367 + ] + + # optional gcc compiler builtins and their call arguments and optional a +diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/multiarray/numpyos.c +index 450ec40b6e0..84617ea78c3 100644 +--- a/numpy/core/src/multiarray/numpyos.c ++++ b/numpy/core/src/multiarray/numpyos.c +@@ -15,7 +15,13 @@ + + #ifdef HAVE_STRTOLD_L + #include <stdlib.h> +-#include <xlocale.h> ++#ifdef HAVE_XLOCALE_H ++ /* ++ * the defines from xlocale.h are included in locale.h on some sytems; ++ * see gh-8367 ++ */ ++ #include <xlocale.h> ++#endif + #endif + + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python-numpy_1.11.1.bb b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python-numpy_1.11.1.bb index c5af720a9..c94f5c3d1 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python-numpy_1.11.1.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python-numpy_1.11.1.bb @@ -9,6 +9,7 @@ SRC_URI = "https://files.pythonhosted.org/packages/source/n/${SRCNAME}/${SRCNAME file://0001-Don-t-search-usr-and-so-on-for-libraries-by-default-.patch \ file://remove-build-path-in-comments.patch \ file://fix_shebang_f2py.patch \ + file://d70d37b7c4aa2af3fe879a0d858c54f2aa32a725.patch \ ${CONFIGFILESURI} " UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/numpy/files/" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python3-numpy_1.11.0.bb b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python3-numpy_1.11.0.bb index 3cca2239d..8b502febc 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python3-numpy_1.11.0.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/python-numpy/python3-numpy_1.11.0.bb @@ -9,6 +9,7 @@ SRC_URI = "https://files.pythonhosted.org/packages/source/n/${SRCNAME}/${SRCNAME file://0001-Don-t-search-usr-and-so-on-for-libraries-by-default-.patch \ file://remove-build-path-in-comments.patch \ file://fix_shebang_f2py.patch \ + file://d70d37b7c4aa2af3fe879a0d858c54f2aa32a725.patch \ ${CONFIGFILESURI} " UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/numpy/files/" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python/python-2.7-manifest.inc b/import-layers/yocto-poky/meta/recipes-devtools/python/python-2.7-manifest.inc index 189689d4c..621024f6a 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/python/python-2.7-manifest.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/python/python-2.7-manifest.inc @@ -246,7 +246,7 @@ RDEPENDS_${PN}-terminal="${PN}-core ${PN}-io" FILES_${PN}-terminal="${libdir}/python2.7/pty.* ${libdir}/python2.7/tty.* " SUMMARY_${PN}-tests="Python tests" -RDEPENDS_${PN}-tests="${PN}-core" +RDEPENDS_${PN}-tests="${PN}-core ${PN}-modules" FILES_${PN}-tests="${libdir}/python2.7/test " SUMMARY_${PN}-textutils="Python option parsing, text wrapping and CSV support" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python/python-3.5-manifest.inc b/import-layers/yocto-poky/meta/recipes-devtools/python/python-3.5-manifest.inc index 304611432..6c690db80 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/python/python-3.5-manifest.inc +++ b/import-layers/yocto-poky/meta/recipes-devtools/python/python-3.5-manifest.inc @@ -115,7 +115,7 @@ FILES_${PN}-image="${libdir}/python3.5/colorsys.* ${libdir}/python3.5/imghdr.* $ SUMMARY_${PN}-importlib="Python import implementation library" RDEPENDS_${PN}-importlib="${PN}-core ${PN}-lang" -FILES_${PN}-importlib="${libdir}/python3.5/importlib " +FILES_${PN}-importlib="${libdir}/python3.5/importlib ${libdir}/python3.5/imp.* " SUMMARY_${PN}-io="Python low-level I/O" RDEPENDS_${PN}-io="${PN}-core ${PN}-math" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/python/python3-native_3.5.2.bb b/import-layers/yocto-poky/meta/recipes-devtools/python/python3-native_3.5.2.bb index 594f15c7c..f32f05cca 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/python/python3-native_3.5.2.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/python/python3-native_3.5.2.bb @@ -65,7 +65,8 @@ RPROVIDES += " \ python3-unittest-native \ " -EXTRA_OECONF_append = " --bindir=${bindir}/${PN} --without-ensurepip" +# uninative may be used on pre glibc 2.25 systems which don't have getentropy +EXTRA_OECONF_append = " --bindir=${bindir}/${PN} --without-ensurepip ac_cv_func_getentropy=no" EXTRA_OEMAKE = '\ LIBC="" \ @@ -80,6 +81,7 @@ PYTHONLSBOPTS = "" do_configure_append() { autoreconf --verbose --install --force --exclude=autopoint ../Python-${PV}/Modules/_ctypes/libffi + sed -i -e 's,#define HAVE_GETRANDOM 1,/\* #undef HAVE_GETRANDOM \*/,' ${B}/pyconfig.h } do_install() { diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0001-virtio-zero-vq-inuse-in-virtio_reset.patch b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0001-virtio-zero-vq-inuse-in-virtio_reset.patch new file mode 100644 index 000000000..86955d043 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0001-virtio-zero-vq-inuse-in-virtio_reset.patch @@ -0,0 +1,57 @@ +Upstream-Status: Backport + +Supplementary fix of CVE-2016-5403 which is backported from: + +http://git.qemu.org/?p=qemu.git;a=commit;h=4b7f91e + +Signed-off-by: Kai Kang <kai.kang@windriver.com> +--- +From 4b7f91ed0270a371e1933efa21ba600b6da23ab9 Mon Sep 17 00:00:00 2001 +From: Stefan Hajnoczi <stefanha@redhat.com> +Date: Wed, 7 Sep 2016 11:51:25 -0400 +Subject: [PATCH] virtio: zero vq->inuse in virtio_reset() + +vq->inuse must be zeroed upon device reset like most other virtqueue +fields. + +In theory, virtio_reset() just needs assert(vq->inuse == 0) since +devices must clean up in-flight requests during reset (requests cannot +not be leaked!). + +In practice, it is difficult to achieve vq->inuse == 0 across reset +because balloon, blk, 9p, etc implement various different strategies for +cleaning up requests. Most devices call g_free(elem) directly without +telling virtio.c that the VirtQueueElement is cleaned up. Therefore +vq->inuse is not decremented during reset. + +This patch zeroes vq->inuse and trusts that devices are not leaking +VirtQueueElements across reset. + +I will send a follow-up series that refactors request life-cycle across +all devices and converts vq->inuse = 0 into assert(vq->inuse == 0) but +this more invasive approach is not appropriate for stable trees. + +Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> +Cc: qemu-stable <qemu-stable@nongnu.org> +Reviewed-by: Michael S. Tsirkin <mst@redhat.com> +Signed-off-by: Michael S. Tsirkin <mst@redhat.com> +Reviewed-by: Ladi Prosek <lprosek@redhat.com> +--- + hw/virtio/virtio.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c +index 74c085c..e8a13a5 100644 +--- a/hw/virtio/virtio.c ++++ b/hw/virtio/virtio.c +@@ -822,6 +822,7 @@ void virtio_reset(void *opaque) + vdev->vq[i].signalled_used_valid = false; + vdev->vq[i].notification = true; + vdev->vq[i].vring.num = vdev->vq[i].vring.num_default; ++ vdev->vq[i].inuse = 0; + } + } + +-- +2.9.3 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0002-fix-CVE-2016-7423.patch b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0002-fix-CVE-2016-7423.patch new file mode 100644 index 000000000..fdf58a3d6 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0002-fix-CVE-2016-7423.patch @@ -0,0 +1,45 @@ +Upstream-Status: Backport + +Backport patch to fix CVE-2016-7423 from: + +http://git.qemu.org/?p=qemu.git;a=commit;h=670e56d3ed + +CVE: CVE-2016-7423 + +Signed-off-by: Kai Kang <kai.kang@windriver.com> +--- +From 670e56d3ed2918b3861d9216f2c0540d9e9ae0d5 Mon Sep 17 00:00:00 2001 +From: Li Qiang <liqiang6-s@360.cn> +Date: Mon, 12 Sep 2016 18:14:11 +0530 +Subject: [PATCH] scsi: mptsas: use g_new0 to allocate MPTSASRequest object + +When processing IO request in mptsas, it uses g_new to allocate +a 'req' object. If an error occurs before 'req->sreq' is +allocated, It could lead to an OOB write in mptsas_free_request +function. Use g_new0 to avoid it. + +Reported-by: Li Qiang <liqiang6-s@360.cn> +Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> +Message-Id: <1473684251-17476-1-git-send-email-ppandit@redhat.com> +Cc: qemu-stable@nongnu.org +Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> +--- + hw/scsi/mptsas.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c +index 0e0a22f..eaae1bb 100644 +--- a/hw/scsi/mptsas.c ++++ b/hw/scsi/mptsas.c +@@ -304,7 +304,7 @@ static int mptsas_process_scsi_io_request(MPTSASState *s, + goto bad; + } + +- req = g_new(MPTSASRequest, 1); ++ req = g_new0(MPTSASRequest, 1); + QTAILQ_INSERT_TAIL(&s->pending, req, next); + req->scsi_io = *scsi_io; + req->dev = s; +-- +2.9.3 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0003-fix-CVE-2016-7908.patch b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0003-fix-CVE-2016-7908.patch new file mode 100644 index 000000000..05cc3d9d1 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0003-fix-CVE-2016-7908.patch @@ -0,0 +1,62 @@ +Upstream-Status: Backport + +Backport patch to fix CVE-2016-7908 from: + +http://git.qemu.org/?p=qemu.git;a=commit;h=070c4b92b8c + +CVE: CVE-2016-7908 + +Signed-off-by: Kai Kang <kai.kang@windriver.com> +--- +From 070c4b92b8cd5390889716677a0b92444d6e087a Mon Sep 17 00:00:00 2001 +From: Prasad J Pandit <pjp@fedoraproject.org> +Date: Thu, 22 Sep 2016 16:02:37 +0530 +Subject: [PATCH] net: mcf: limit buffer descriptor count + +ColdFire Fast Ethernet Controller uses buffer descriptors to manage +data flow to/fro receive & transmit queues. While transmitting +packets, it could continue to read buffer descriptors if a buffer +descriptor has length of zero and has crafted values in bd.flags. +Set upper limit to number of buffer descriptors. + +Reported-by: Li Qiang <liqiang6-s@360.cn> +Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> +Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> +Signed-off-by: Jason Wang <jasowang@redhat.com> +--- + hw/net/mcf_fec.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c +index 0ee8ad9..d31fea1 100644 +--- a/hw/net/mcf_fec.c ++++ b/hw/net/mcf_fec.c +@@ -23,6 +23,7 @@ do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) + #define DPRINTF(fmt, ...) do {} while(0) + #endif + ++#define FEC_MAX_DESC 1024 + #define FEC_MAX_FRAME_SIZE 2032 + + typedef struct { +@@ -149,7 +150,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s) + uint32_t addr; + mcf_fec_bd bd; + int frame_size; +- int len; ++ int len, descnt = 0; + uint8_t frame[FEC_MAX_FRAME_SIZE]; + uint8_t *ptr; + +@@ -157,7 +158,7 @@ static void mcf_fec_do_tx(mcf_fec_state *s) + ptr = frame; + frame_size = 0; + addr = s->tx_descriptor; +- while (1) { ++ while (descnt++ < FEC_MAX_DESC) { + mcf_fec_read_bd(&bd, addr); + DPRINTF("tx_bd %x flags %04x len %d data %08x\n", + addr, bd.flags, bd.length, bd.data); +-- +2.9.3 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0004-fix-CVE-2016-7909.patch b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0004-fix-CVE-2016-7909.patch new file mode 100644 index 000000000..e71bbf620 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/0004-fix-CVE-2016-7909.patch @@ -0,0 +1,42 @@ +Upstream-Status: Backport [http://git.qemu.org/?p=qemu.git;a=commit;h=34e29ce] +CVE: CVE-2016-7909 + +Signed-off-by: Kai Kang <kai.kang@windriver.com> +--- +From 34e29ce754c02bb6b3bdd244fbb85033460feaff Mon Sep 17 00:00:00 2001 +From: Prasad J Pandit <pjp@fedoraproject.org> +Date: Fri, 30 Sep 2016 00:27:33 +0530 +Subject: [PATCH] net: pcnet: check rx/tx descriptor ring length + +The AMD PC-Net II emulator has set of control and status(CSR) +registers. Of these, CSR76 and CSR78 hold receive and transmit +descriptor ring length respectively. This ring length could range +from 1 to 65535. Setting ring length to zero leads to an infinite +loop in pcnet_rdra_addr() or pcnet_transmit(). Add check to avoid it. + +Reported-by: Li Qiang <liqiang6-s@360.cn> +Signed-off-by: Prasad J Pandit <pjp@fedoraproject.org> +Signed-off-by: Jason Wang <jasowang@redhat.com> +--- + hw/net/pcnet.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c +index 198a01f..3078de8 100644 +--- a/hw/net/pcnet.c ++++ b/hw/net/pcnet.c +@@ -1429,8 +1429,11 @@ static void pcnet_csr_writew(PCNetState *s, uint32_t rap, uint32_t new_value) + case 47: /* POLLINT */ + case 72: + case 74: ++ break; + case 76: /* RCVRL */ + case 78: /* XMTRL */ ++ val = (val > 0) ? val : 512; ++ break; + case 112: + if (CSR_STOP(s) || CSR_SPND(s)) + break; +-- +2.10.1 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/04b33e21866412689f18b7ad6daf0a54d8f959a7.patch b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/04b33e21866412689f18b7ad6daf0a54d8f959a7.patch new file mode 100644 index 000000000..d947e8cba --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/04b33e21866412689f18b7ad6daf0a54d8f959a7.patch @@ -0,0 +1,282 @@ +From 04b33e21866412689f18b7ad6daf0a54d8f959a7 Mon Sep 17 00:00:00 2001 +From: Khem Raj <raj.khem@gmail.com> +Date: Wed, 28 Jun 2017 13:44:52 -0700 +Subject: [PATCH] Replace 'struct ucontext' with 'ucontext_t' type + +glibc used to have: + + typedef struct ucontext { ... } ucontext_t; + +glibc now has: + + typedef struct ucontext_t { ... } ucontext_t; + +(See https://sourceware.org/bugzilla/show_bug.cgi?id=21457 + for detail and rationale for the glibc change) + +However, QEMU used "struct ucontext" in declarations. This is a +private name and compatibility cannot be guaranteed. Switch to +only using the standardized type name. + +Signed-off-by: Khem Raj <raj.khem@gmail.com> +Message-id: 20170628204452.41230-1-raj.khem@gmail.com +Cc: Kamil Rytarowski <kamil@netbsd.org> +Cc: Riku Voipio <riku.voipio@iki.fi> +Cc: Laurent Vivier <laurent@vivier.eu> +Cc: Paolo Bonzini <pbonzini@redhat.com> +Reviewed-by: Eric Blake <eblake@redhat.com> +[PMM: Rewrote commit message, based mostly on the one from + Nathaniel McCallum] +Signed-off-by: Peter Maydell <peter.maydell@linaro.org> + +Upstream-Status: Backport +RP 2017/9/6 +--- + linux-user/host/aarch64/hostdep.h | 2 +- + linux-user/host/arm/hostdep.h | 2 +- + linux-user/host/i386/hostdep.h | 2 +- + linux-user/host/ppc64/hostdep.h | 2 +- + linux-user/host/s390x/hostdep.h | 2 +- + linux-user/host/x86_64/hostdep.h | 2 +- + linux-user/signal.c | 10 +++++----- + tests/tcg/test-i386.c | 4 ++-- + user-exec.c | 18 +++++++++--------- + 9 files changed, 22 insertions(+), 22 deletions(-) + +diff --git a/linux-user/host/aarch64/hostdep.h b/linux-user/host/aarch64/hostdep.h +index 64f75ce..a8d41a2 100644 +--- a/linux-user/host/aarch64/hostdep.h ++++ b/linux-user/host/aarch64/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + __u64 *pcreg = &uc->uc_mcontext.pc; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/host/arm/hostdep.h b/linux-user/host/arm/hostdep.h +index 5c1ae60..9276fe6 100644 +--- a/linux-user/host/arm/hostdep.h ++++ b/linux-user/host/arm/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.arm_pc; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/host/i386/hostdep.h b/linux-user/host/i386/hostdep.h +index d834bd8..073be74 100644 +--- a/linux-user/host/i386/hostdep.h ++++ b/linux-user/host/i386/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + greg_t *pcreg = &uc->uc_mcontext.gregs[REG_EIP]; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/host/ppc64/hostdep.h b/linux-user/host/ppc64/hostdep.h +index 0b0f5f7..98979ad 100644 +--- a/linux-user/host/ppc64/hostdep.h ++++ b/linux-user/host/ppc64/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.gp_regs[PT_NIP]; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h +index 6f9da9c..4f0171f 100644 +--- a/linux-user/host/s390x/hostdep.h ++++ b/linux-user/host/s390x/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.psw.addr; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/host/x86_64/hostdep.h b/linux-user/host/x86_64/hostdep.h +index 3b42596..a4fefb5 100644 +--- a/linux-user/host/x86_64/hostdep.h ++++ b/linux-user/host/x86_64/hostdep.h +@@ -24,7 +24,7 @@ extern char safe_syscall_end[]; + /* Adjust the signal context to rewind out of safe-syscall if we're in it */ + static inline void rewind_if_in_safe_syscall(void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + greg_t *pcreg = &uc->uc_mcontext.gregs[REG_RIP]; + + if (*pcreg > (uintptr_t)safe_syscall_start +diff --git a/linux-user/signal.c b/linux-user/signal.c +index d68bd26..cc0c3fc 100644 +--- a/linux-user/signal.c ++++ b/linux-user/signal.c +@@ -3346,7 +3346,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, + * + * a0 = signal number + * a1 = pointer to siginfo_t +- * a2 = pointer to struct ucontext ++ * a2 = pointer to ucontext_t + * + * $25 and PC point to the signal handler, $29 points to the + * struct sigframe. +@@ -3764,7 +3764,7 @@ struct target_signal_frame { + + struct rt_signal_frame { + siginfo_t info; +- struct ucontext uc; ++ ucontext_t uc; + uint32_t tramp[2]; + }; + +@@ -3980,7 +3980,7 @@ struct rt_signal_frame { + siginfo_t *pinfo; + void *puc; + siginfo_t info; +- struct ucontext uc; ++ ucontext_t uc; + uint16_t retcode[4]; /* Trampoline code. */ + }; + +@@ -4515,7 +4515,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka, + tswap_siginfo(&frame->info, info); + } + +- /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/ ++ /*err |= __clear_user(&frame->uc, offsetof(ucontext_t, uc_mcontext));*/ + __put_user(0, &frame->uc.tuc_flags); + __put_user(0, &frame->uc.tuc_link); + __put_user(target_sigaltstack_used.ss_sp, +@@ -5007,7 +5007,7 @@ enum { + + struct target_ucontext { + target_ulong tuc_flags; +- target_ulong tuc_link; /* struct ucontext __user * */ ++ target_ulong tuc_link; /* ucontext_t __user * */ + struct target_sigaltstack tuc_stack; + #if !defined(TARGET_PPC64) + int32_t tuc_pad[7]; +diff --git a/tests/tcg/test-i386.c b/tests/tcg/test-i386.c +index 0f7b943..9599204 100644 +--- a/tests/tcg/test-i386.c ++++ b/tests/tcg/test-i386.c +@@ -1720,7 +1720,7 @@ int tab[2]; + + void sig_handler(int sig, siginfo_t *info, void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + + printf("si_signo=%d si_errno=%d si_code=%d", + info->si_signo, info->si_errno, info->si_code); +@@ -1912,7 +1912,7 @@ void test_exceptions(void) + /* specific precise single step test */ + void sig_trap_handler(int sig, siginfo_t *info, void *puc) + { +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + printf("EIP=" FMTLX "\n", (long)uc->uc_mcontext.gregs[REG_EIP]); + } + +diff --git a/user-exec.c b/user-exec.c +index a8f95fa..2a975ea 100644 +--- a/user-exec.c ++++ b/user-exec.c +@@ -167,7 +167,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + #elif defined(__OpenBSD__) + struct sigcontext *uc = puc; + #else +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + #endif + unsigned long pc; + int trapno; +@@ -222,7 +222,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + #elif defined(__OpenBSD__) + struct sigcontext *uc = puc; + #else +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + #endif + + pc = PC_sig(uc); +@@ -289,7 +289,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + ucontext_t *uc = puc; + #else +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + #endif + unsigned long pc; + int is_write; +@@ -316,7 +316,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) + { + siginfo_t *info = pinfo; +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + uint32_t *pc = uc->uc_mcontext.sc_pc; + uint32_t insn = *pc; + int is_write = 0; +@@ -414,7 +414,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + #if defined(__NetBSD__) + ucontext_t *uc = puc; + #else +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + #endif + unsigned long pc; + int is_write; +@@ -441,7 +441,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + int cpu_signal_handler(int host_signum, void *pinfo, void *puc) + { + siginfo_t *info = pinfo; +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + uintptr_t pc = uc->uc_mcontext.pc; + uint32_t insn = *(uint32_t *)pc; + bool is_write; +@@ -474,7 +474,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc) + int cpu_signal_handler(int host_signum, void *pinfo, void *puc) + { + siginfo_t *info = pinfo; +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + unsigned long ip; + int is_write = 0; + +@@ -505,7 +505,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) + { + siginfo_t *info = pinfo; +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + unsigned long pc; + uint16_t *pinsn; + int is_write = 0; +@@ -558,7 +558,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, + void *puc) + { + siginfo_t *info = pinfo; +- struct ucontext *uc = puc; ++ ucontext_t *uc = puc; + greg_t pc = uc->uc_mcontext.pc; + int is_write; + +-- +1.8.3.1 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/run-ptest b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/run-ptest index f4b8e97e1..2206b3192 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/run-ptest +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu/run-ptest @@ -1,8 +1,10 @@ #!/bin/sh # #This script is used to run qemu test suites -ptestdir=$(pwd) -cd tests +# +ptestdir=$(dirname "$(readlink -f "$0")") export SRC_PATH=$ptestdir -make -k runtest-TESTS | sed '/: OK/ s/^/PASS: /g' + +cd $ptestdir/tests +make -f Makefile.include -k runtest-TESTS | sed '/: OK/ s/^/PASS: /g' diff --git a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu_2.7.0.bb b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu_2.7.0.bb index 619b8ed44..85aadecf0 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu_2.7.0.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/qemu/qemu_2.7.0.bb @@ -9,6 +9,11 @@ SRC_URI += "file://configure-fix-Darwin-target-detection.patch \ file://no-valgrind.patch \ file://pathlimit.patch \ file://qemu-2.5.0-cflags.patch \ + file://0001-virtio-zero-vq-inuse-in-virtio_reset.patch \ + file://0002-fix-CVE-2016-7423.patch \ + file://0003-fix-CVE-2016-7908.patch \ + file://0004-fix-CVE-2016-7909.patch \ + file://04b33e21866412689f18b7ad6daf0a54d8f959a7.patch \ " SRC_URI_prepend = "http://wiki.qemu-project.org/download/${BP}.tar.bz2" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm/0001-macros-add-_gpg_sign_cmd_extra_args.patch b/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm/0001-macros-add-_gpg_sign_cmd_extra_args.patch new file mode 100644 index 000000000..eb43a8734 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm/0001-macros-add-_gpg_sign_cmd_extra_args.patch @@ -0,0 +1,43 @@ +From fa9726ff69f86d6a87c4c4bd7e3d2881999a872a Mon Sep 17 00:00:00 2001 +From: Markus Lehtonen <markus.lehtonen@linux.intel.com> +Date: Thu, 23 Feb 2017 11:14:20 +0200 +Subject: [PATCH] macros: add %_gpg_sign_cmd_extra_args + +Similar to what rpm4 has. This macro can be used to customize the +gpg command line options when signing packages. This is needed for +gpg 2.1 which requires "--pinentry-mode loopback" to allow +non-interactive signing. + +Upstream-Status: Pending + +Signed-off-by: Markus Lehtonen <markus.lehtonen@linux.intel.com> +--- + macros/macros.in | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/macros/macros.in b/macros/macros.in +index 8bc5840..fda3c66 100644 +--- a/macros/macros.in ++++ b/macros/macros.in +@@ -524,7 +524,9 @@ $_arbitrary_tags_tests Foo:Bar + %_gpg_passphrase_way %{?_gpg_passphrase:--passphrase "%{_gpg_passphrase}"}%{!?_gpg_passphrase:--passphrase-fd 3} + + %__gpg_check_password_cmd %{__gpg} \ +- gpg --batch --no-verbose %{_gpg_passphrase_way} -u "%{_gpg_name}" -so - ++ gpg --batch --no-verbose %{_gpg_passphrase_way} \ ++ %{?_gpg_sign_cmd_extra_args:%{_gpg_sign_cmd_extra_args}} \ ++ -u "%{_gpg_name}" -so - + #%__pgp_check_password_cmd %{__pgp} \ + # pgp +batchmode=on +verbose=0 "%{_pgp_name}" -sf + #%__pgp5_check_password_cmd %{__pgp} \ +@@ -532,6 +534,7 @@ $_arbitrary_tags_tests Foo:Bar + + %__gpg_sign_cmd %{__gpg} \ + gpg --batch --no-verbose --no-armor %{_gpg_passphrase_way} --no-secmem-warning \ ++ %{?_gpg_sign_cmd_extra_args:%{_gpg_sign_cmd_extra_args}} \ + -u "%{_gpg_name}" -sbo %{__signature_filename} %{__plaintext_filename} + #%__pgp_sign_cmd %{__pgp} \ + # pgp +batchmode=on +verbose=0 +armor=off \ +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.16.bb b/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.16.bb index 133239712..497af8e05 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.16.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/rpm/rpm_5.4.16.bb @@ -119,6 +119,7 @@ SRC_URI += " \ file://gcc6-stdlib.patch \ file://0001-system.h-query.c-support-nosignature.patch \ file://rpm-ensure-rpm2cpio-call-rpm-relocation-code.patch \ + file://0001-macros-add-_gpg_sign_cmd_extra_args.patch \ " # OE specific changes @@ -384,10 +385,10 @@ FILES_${PN} = "${bindir}/rpm \ ${localstatedir}/lib/wdj \ ${bindir}/rpm.real \ ${bindir}/rpmconstant.real \ - ${bindir}/rpm2cpio.real \ " FILES_${PN}-common = "${bindir}/rpm2cpio \ + ${bindir}/rpm2cpio.real \ ${bindir}/gendiff \ ${sysconfdir}/rpm \ ${localstatedir}/spool/repackage \ diff --git a/import-layers/yocto-poky/meta/recipes-devtools/sgml-common/sgml-common_0.6.3.bb b/import-layers/yocto-poky/meta/recipes-devtools/sgml-common/sgml-common_0.6.3.bb index 6098673a3..64a6b92c6 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/sgml-common/sgml-common_0.6.3.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/sgml-common/sgml-common_0.6.3.bb @@ -15,7 +15,7 @@ SECTION = "base" PR = "r1" -SRC_URI = "ftp://sources.redhat.com/pub/docbook-tools/new-trials/SOURCES/sgml-common-${PV}.tgz \ +SRC_URI = "https://ftp.osuosl.org/pub/blfs/conglomeration/sgml-common/sgml-common-${PV}.tgz \ file://autohell.patch \ file://license.patch" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/0001-fix-svnadmin-create-fail-on-x86.patch b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/0001-fix-svnadmin-create-fail-on-x86.patch new file mode 100644 index 000000000..d4405287b --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/0001-fix-svnadmin-create-fail-on-x86.patch @@ -0,0 +1,56 @@ +From 09475e0befca8d120c957177ce8568fa2209a1a9 Mon Sep 17 00:00:00 2001 +From: Dengke Du <dengke.du@windriver.com> +Date: Wed, 2 Nov 2016 11:09:44 +0800 +Subject: [PATCH] fix "svnadmin create" fail on x86 + +When run the following command on x86: + + svnadmin create /var/test_repo + +It cause segmentation fault error like the following: + + [16499.751837] svnadmin[21117]: segfault at 83 ip 00000000f74bf7f6 sp 00000000ffdd9b34 error 4 in libc-2.24.so[f7441000+1af000] + Segmentation fault (core dumped) + +This is because in source code ./subversion/libsvn_fs_fs/low_level.c, +function svn_fs_fs__unparse_footer, when: + + target arch: x86 + apr_off_t: 4 bytes + +if the "APR_OFF_T_FMT" is "lld", it still use type "apr_off_t" to pass +data to apr, but in apr source code file apr_snprintf.c the function +apr_vformatter meet "lld", it would use the: + + i_quad = va_arg(ap, apr_int64_t); + +It uses the apr_int64_t to deal data, it read 8 bytes, so the follow-up +data may be error. + +Upstream-Status: Pending + +Signed-off-by: Dengke Du <dengke.du@windriver.com> +--- + subversion/libsvn_fs_fs/low_level.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/subversion/libsvn_fs_fs/low_level.c b/subversion/libsvn_fs_fs/low_level.c +index a27bbcc..6ddbe28 100644 +--- a/subversion/libsvn_fs_fs/low_level.c ++++ b/subversion/libsvn_fs_fs/low_level.c +@@ -250,10 +250,10 @@ svn_fs_fs__unparse_footer(apr_off_t l2p_offset, + { + return svn_stringbuf_createf(result_pool, + "%" APR_OFF_T_FMT " %s %" APR_OFF_T_FMT " %s", +- l2p_offset, ++ (APR_OFF_T_FMT=="lld") ? (apr_int64_t)l2p_offset : l2p_offset, + svn_checksum_to_cstring(l2p_checksum, + scratch_pool), +- p2l_offset, ++ (APR_OFF_T_FMT=="lld") ? (apr_int64_t)p2l_offset : p2l_offset, + svn_checksum_to_cstring(p2l_checksum, + scratch_pool)); + } +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/serfmacro.patch b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/serfmacro.patch new file mode 100644 index 000000000..9a45cb9a0 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion/serfmacro.patch @@ -0,0 +1,22 @@ +The existing sed expression can match expressions like +--sysroot=/some/path/xxx-linux/ which clearly isn't intended and +injects incorrect paths into LDFLAGS. + +Fix this in the same way we address the problem in CFLAGS. + +RP 2016/12/7 +Upstream-Status: Pending + +Index: subversion-1.9.4/build/ac-macros/serf.m4 +=================================================================== +--- subversion-1.9.4.orig/build/ac-macros/serf.m4 ++++ subversion-1.9.4/build/ac-macros/serf.m4 +@@ -171,7 +171,7 @@ AC_DEFUN(SVN_SERF_PKG_CONFIG, + SVN_SERF_INCLUDES=[`$PKG_CONFIG $serf_pc_arg --cflags | $SED -e 's/ -D[^ ]*//g' -e 's/^-D[^ ]*//g'`] + SVN_SERF_LIBS=`$PKG_CONFIG $serf_pc_arg --libs-only-l` + dnl don't use --libs-only-L because then we might miss some options +- LDFLAGS=["$LDFLAGS `$PKG_CONFIG $serf_pc_arg --libs | $SED -e 's/-l[^ ]*//g'`"] ++ LDFLAGS=["$LDFLAGS `$PKG_CONFIG $serf_pc_arg --libs | $SED -e 's/ -l[^ ]*//g' -e 's/^-l[^ ]*//g'`"] + break + else + AC_MSG_RESULT([no]) diff --git a/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion_1.9.4.bb b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion_1.9.4.bb index 3ce83c1da..3e3594075 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion_1.9.4.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/subversion/subversion_1.9.4.bb @@ -14,6 +14,8 @@ SRC_URI = "${APACHE_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \ file://disable_macos.patch \ file://serf.m4-Regex-modified-to-allow-D-in-paths.patch \ file://0001-Fix-libtool-name-in-configure.ac.patch \ + file://0001-fix-svnadmin-create-fail-on-x86.patch \ + file://serfmacro.patch \ " SRC_URI[md5sum] = "29121a038f87641055a8183f49e9739f" diff --git a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/11_mips-link-tool.patch b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/11_mips-link-tool.patch deleted file mode 100644 index ecb33b8cb..000000000 --- a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/11_mips-link-tool.patch +++ /dev/null @@ -1,37 +0,0 @@ -Debian fix for MIPS: mmap(0x400000, 32768) failed in UME with error 22 (Invalid argument) - - https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=777704 - -Upstream-Status: Pending - -Signed-off-by: Andre McCurdy <armccurdy@gmail.com> - - -Description: Disable the MIPS linker workarounds when using -Ttext-section -Origin: vendor -Bug-Debian: http://bugs.debian.org/777704 -Author: James Cowgill <james410@cowgill.org.uk> -Last-Update: 2015-04-25 - ---- a/coregrind/link_tool_exe_linux.in -+++ b/coregrind/link_tool_exe_linux.in -@@ -76,12 +76,13 @@ - my $arch = substr($x, 0, index($x, "'")); - - my $extra_args; --if (($arch eq 'mips') || ($arch eq 'mipsel') -- || ($arch eq 'mipsisa32r2el')) { -- $extra_args = "-static -Wl,--section-start=.reginfo=$ala"; --} elsif (($arch eq 'mips64') || ($arch eq 'mips64el') || -- ($arch eq 'mipsisa64el')) { -- $extra_args = "-static -Wl,--section-start=.MIPS.options=$ala"; -+if ($arch =~ /^mips/ && "@FLAG_T_TEXT@" eq '-Ttext') { -+ # We only need to use the special mips options when using -Ttext -+ if ($arch =~ /^mips(64|isa64)/) { -+ $extra_args = "-static -Wl,--section-start=.MIPS.options=$ala"; -+ } else { -+ $extra_args = "-static -Wl,--section-start=.reginfo=$ala"; -+ } - } else { - $extra_args = "-static -Wl,@FLAG_T_TEXT@=$ala"; - } diff --git a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/gcc5-port.patch b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/gcc5-port.patch deleted file mode 100644 index 76bc82187..000000000 --- a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/gcc5-port.patch +++ /dev/null @@ -1,64 +0,0 @@ -backport fix from upstream to fix build errors with gcc 6 - -http://valgrind.10908.n7.nabble.com/Valgrind-r15773-in-trunk-configure-ac-drd-tests-std-thread-cpp-drd-tests-std-thread2-cpp-td56109.html - -../../../valgrind-3.11.0/drd/tests/std_thread.cpp:30:3: error: '_Impl_base' is not a member of 'std::thread' - std::thread::_Impl_base* __t = static_cast<std::thread::_Impl_base*>(__p); - ^~~ -../../../valgrind-3.11.0/drd/tests/std_thread.cpp:30:28: error: '__t' was not declared in this scope - std::thread::_Impl_base* __t = static_cast<std::thread::_Impl_base*>(__p); - ^~~ -../../../valgrind-3.11.0/drd/tests/std_thread.cpp:30:59: error: '_Impl_base' in 'class std::thread' does not name a type - std::thread::_Impl_base* __t = static_cast<std::thread::_Impl_base*>(__p); - ^~~~~~~~~~ - -Signed-off-by: Khem Raj <raj.khem@gmail.com> -Upstream-Status: Backport - -Index: configure.ac -=================================================================== ---- a/configure.ac (revision 15772) -+++ b/configure.ac (revision 15773) -@@ -160,7 +160,7 @@ - icc-1[[3-9]].*) - AC_MSG_RESULT([ok (ICC version ${gcc_version})]) - ;; -- notclang-[[3-9]].*|notclang-[[1-9][0-9]]*) -+ notclang-[[3-9]]|notclang-[[3-9]].*|notclang-[[1-9][0-9]]*) - AC_MSG_RESULT([ok (${gcc_version})]) - ;; - clang-2.9|clang-[[3-9]].*|clang-[[1-9][0-9]]*) -Index: drd/tests/std_thread2.cpp -=================================================================== ---- a/drd/tests/std_thread2.cpp (revision 15772) -+++ b/drd/tests/std_thread2.cpp (revision 15773) -@@ -26,6 +26,7 @@ - return 0; - } - -+#if defined(__GNUC__) && __GNUC__ -0 < 6 - // - // From libstdc++-v3/src/c++11/thread.cc - // -@@ -70,3 +71,4 @@ - } - } - } -+#endif -Index: drd/tests/std_thread.cpp -=================================================================== ---- a/drd/tests/std_thread.cpp (revision 15772) -+++ b/drd/tests/std_thread.cpp (revision 15773) -@@ -21,6 +21,7 @@ - return 0; - } - -+#if defined(__GNUC__) && __GNUC__ -0 < 6 - // - // From libstdc++-v3/src/c++11/thread.cc - // -@@ -65,3 +66,4 @@ - } - } - } -+#endif diff --git a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/valgrind-make-ld-XXX.so-strlen-intercept-optional.patch b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/valgrind-make-ld-XXX.so-strlen-intercept-optional.patch new file mode 100644 index 000000000..d04297dca --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind/valgrind-make-ld-XXX.so-strlen-intercept-optional.patch @@ -0,0 +1,45 @@ +From 005bd11809a1ce65e9f2c28e884354a4741650b9 Mon Sep 17 00:00:00 2001 +From: Andre McCurdy <armccurdy@gmail.com> +Date: Tue, 13 Dec 2016 11:29:55 +0800 +Subject: [PATCH] make ld-XXX.so strlen intercept optional + +Hack: Depending on how glibc was compiled (e.g. optimised for size or +built with _FORTIFY_SOURCE enabled) the strlen symbol might not be +found in ld-XXX.so. Therefore although we should still try to +intercept it, don't make it mandatory to do so. + +Upstream-Status: Inappropriate + +Signed-off-by: Andre McCurdy <armccurdy@gmail.com> +Signed-off-by: Jackie Huang <jackie.huang@windriver.com> +--- + coregrind/m_redir.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +diff --git a/coregrind/m_redir.c b/coregrind/m_redir.c +index ff35009..d7d6816 100644 +--- a/coregrind/m_redir.c ++++ b/coregrind/m_redir.c +@@ -1275,7 +1275,18 @@ static void add_hardwired_spec (const HChar* sopatt, const HChar* fnpatt, + spec->to_addr = to_addr; + spec->isWrap = False; + spec->isGlobal = False; +- spec->mandatory = mandatory; ++ ++ /* Hack: Depending on how glibc was compiled (e.g. optimised for size or ++ built with _FORTIFY_SOURCE enabled) the strlen symbol might not be found. ++ Therefore although we should still try to intercept it, don't make it ++ mandatory to do so. We over-ride "mandatory" here to avoid the need to ++ patch the many different architecture specific callers to ++ add_hardwired_spec(). */ ++ if (0==VG_(strcmp)("strlen", fnpatt)) ++ spec->mandatory = NULL; ++ else ++ spec->mandatory = mandatory; ++ + /* VARIABLE PARTS */ + spec->mark = False; /* not significant */ + spec->done = False; /* not significant */ +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind_3.11.0.bb b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind_3.12.0.bb index 42fd27f53..d82541b84 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind_3.11.0.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/valgrind/valgrind_3.12.0.bb @@ -16,20 +16,19 @@ SRC_URI = "http://www.valgrind.org/downloads/valgrind-${PV}.tar.bz2 \ file://fixed-perl-path.patch \ file://Added-support-for-PPC-instructions-mfatbu-mfatbl.patch \ file://run-ptest \ - file://11_mips-link-tool.patch \ file://0002-remove-rpath.patch \ file://0004-Fix-out-of-tree-builds.patch \ file://0005-Modify-vg_test-wrapper-to-support-PTEST-formats.patch \ file://0001-Remove-tests-that-fail-to-build-on-some-PPC32-config.patch \ file://use-appropriate-march-mcpu-mfpu-for-ARM-test-apps.patch \ file://avoid-neon-for-targets-which-don-t-support-it.patch \ - file://gcc5-port.patch \ + file://valgrind-make-ld-XXX.so-strlen-intercept-optional.patch \ " SRC_URI_append_libc-musl = "\ file://0001-fix-build-for-musl-targets.patch \ " -SRC_URI[md5sum] = "4ea62074da73ae82e0162d6550d3f129" -SRC_URI[sha256sum] = "6c396271a8c1ddd5a6fb9abe714ea1e8a86fce85b30ab26b4266aeb4c2413b42" +SRC_URI[md5sum] = "6eb03c0c10ea917013a7622e483d61bb" +SRC_URI[sha256sum] = "67ca4395b2527247780f36148b084f5743a68ab0c850cb43e4a5b4b012cf76a1" COMPATIBLE_HOST = '(i.86|x86_64|arm|aarch64|mips|powerpc|powerpc64).*-linux' diff --git a/import-layers/yocto-poky/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb b/import-layers/yocto-poky/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb index aaaeb6eb2..6072a7b1b 100644 --- a/import-layers/yocto-poky/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb +++ b/import-layers/yocto-poky/meta/recipes-devtools/xmlto/xmlto_0.0.28.bb @@ -1,11 +1,11 @@ SUMMARY = "A shell-script tool for converting XML files to various formats" -HOMEPAGE = "https://fedorahosted.org/xmlto/" +HOMEPAGE = "https://releases.pagure.org/xmlto/" SECTION = "docs/xmlto" LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552" -SRC_URI = "https://fedorahosted.org/releases/x/m/xmlto/xmlto-${PV}.tar.gz \ +SRC_URI = "https://releases.pagure.org/xmlto/xmlto-${PV}.tar.gz \ file://configure.in-drop-the-test-of-xmllint-and-xsltproc.patch \ file://catalog.xml \ " diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/build-tests.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/build-tests.patch index e63457cf2..e63457cf2 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/build-tests.patch +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/build-tests.patch diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/dont-include-target-CFLAGS-in-host-LDFLAGS.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/dont-include-target-CFLAGS-in-host-LDFLAGS.patch index ee756dc9e..ee756dc9e 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/dont-include-target-CFLAGS-in-host-LDFLAGS.patch +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/dont-include-target-CFLAGS-in-host-LDFLAGS.patch diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/mkbuiltins_have_stringize.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/mkbuiltins_have_stringize.patch index c4229a7ed..c4229a7ed 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/mkbuiltins_have_stringize.patch +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/mkbuiltins_have_stringize.patch diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/run-ptest b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/run-ptest index 8dd3b9981..8dd3b9981 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/run-ptest +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/run-ptest diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/string-format.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/string-format.patch index eda39649d..eda39649d 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/string-format.patch +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/string-format.patch diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/test-output.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/test-output.patch index 2b09b7d97..2b09b7d97 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.48/test-output.patch +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash-3.2.57/test-output.patch diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash/CVE-2016-9401.patch b/import-layers/yocto-poky/meta/recipes-extended/bash/bash/CVE-2016-9401.patch new file mode 100644 index 000000000..28c927743 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash/CVE-2016-9401.patch @@ -0,0 +1,50 @@ +From fa741771ed47b30547be63b5b5dbfb51977aca12 Mon Sep 17 00:00:00 2001 +From: Chet Ramey <chet.ramey@case.edu> +Date: Fri, 20 Jan 2017 11:47:31 -0500 +Subject: [PATCH] Bash-4.4 patch 6 + +Bug-Reference-URL: +https://lists.gnu.org/archive/html/bug-bash/2016-11/msg00116.html + +Reference to upstream patch: +https://ftp.gnu.org/pub/gnu/bash/bash-4.4-patches/bash44-006 + +Bug-Description: +Out-of-range negative offsets to popd can cause the shell to crash attempting +to free an invalid memory block. + +Upstream-Status: Backport +CVE: CVE-2016-9401 +Signed-off-by: Li Zhou <li.zhou@windriver.com> +--- + builtins/pushd.def | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/builtins/pushd.def b/builtins/pushd.def +index 9c6548f..8a13bae 100644 +--- a/builtins/pushd.def ++++ b/builtins/pushd.def +@@ -359,7 +359,7 @@ popd_builtin (list) + break; + } + +- if (which > directory_list_offset || (directory_list_offset == 0 && which == 0)) ++ if (which > directory_list_offset || (which < -directory_list_offset) || (directory_list_offset == 0 && which == 0)) + { + pushd_error (directory_list_offset, which_word ? which_word : ""); + return (EXECUTION_FAILURE); +@@ -381,6 +381,11 @@ popd_builtin (list) + remove that directory from the list and shift the remainder + of the list into place. */ + i = (direction == '+') ? directory_list_offset - which : which; ++ if (i < 0 || i > directory_list_offset) ++ { ++ pushd_error (directory_list_offset, which_word ? which_word : ""); ++ return (EXECUTION_FAILURE); ++ } + free (pushd_directory_list[i]); + directory_list_offset--; + +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.48.bb b/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.48.bb deleted file mode 100644 index 6b4028df1..000000000 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.48.bb +++ /dev/null @@ -1,47 +0,0 @@ -require bash.inc - -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=fd5d9bcabd8ed5a54a01ce8d183d592a" - -PR = "r11" - -SRC_URI = "${GNU_MIRROR}/bash/bash-${PV}.tar.gz;name=tarball \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-049;apply=yes;striplevel=0;name=patch049 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-050;apply=yes;striplevel=0;name=patch050 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-051;apply=yes;striplevel=0;name=patch051 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-052;apply=yes;striplevel=0;name=patch052 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-053;apply=yes;striplevel=0;name=patch053 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-054;apply=yes;striplevel=0;name=patch054 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-055;apply=yes;striplevel=0;name=patch055 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-056;apply=yes;striplevel=0;name=patch056 \ - ${GNU_MIRROR}/bash/bash-3.2-patches/bash32-057;apply=yes;striplevel=0;name=patch057 \ - file://mkbuiltins_have_stringize.patch \ - file://build-tests.patch \ - file://test-output.patch \ - file://run-ptest \ - file://dont-include-target-CFLAGS-in-host-LDFLAGS.patch \ - file://string-format.patch \ - " - -SRC_URI[tarball.md5sum] = "338dcf975a93640bb3eaa843ca42e3f8" -SRC_URI[tarball.sha256sum] = "128d281bd5682ba5f6953122915da71976357d7a76490d266c9173b1d0426348" -SRC_URI[patch049.md5sum] = "af571a2d164d5abdcae4499e94e8892c" -SRC_URI[patch049.sha256sum] = "b1217ed94bdb95dc878fa5cabbf8a164435eb0d9da23a392198f48566ee34a2f" -SRC_URI[patch050.md5sum] = "8443d4385d73ec835abe401d90591377" -SRC_URI[patch050.sha256sum] = "081bb03c580ecee63ba03b40beb3caf509eca29515b2e8dd3c078503609a1642" -SRC_URI[patch051.md5sum] = "15c6653042e9814aa87120098fc7a849" -SRC_URI[patch051.sha256sum] = "354886097cd95b4def77028f32ee01e2e088d58a98184fede9d3ce9320e218ef" -SRC_URI[patch052.md5sum] = "691023a944bbb9003cc92ad462d91fa1" -SRC_URI[patch052.sha256sum] = "a0eccf9ceda50871db10d21efdd74b99e35efbd55c970c400eeade012816bb61" -SRC_URI[patch053.md5sum] = "eb97d1c9230a55283d9dac69d3de2e46" -SRC_URI[patch053.sha256sum] = "fe6f0e96e0b966eaed9fb5e930ca12891f4380f30f9e0a773d200ff2063a864e" -SRC_URI[patch054.md5sum] = "1107744058c43b247f597584b88ba0a6" -SRC_URI[patch054.sha256sum] = "c6dab911e85688c542ce75afc175dbb4e5011de5102758e19a4a80dac1e79359" -SRC_URI[patch055.md5sum] = "05d201176d3499e2dfa4a73d09d42f05" -SRC_URI[patch055.sha256sum] = "c0e816700837942ed548da74e5917f74b70cbbbb10c9f2caf73e8e06a0713d0a" -SRC_URI[patch056.md5sum] = "222eaa3a2c26f54a15aa5e08817a534a" -SRC_URI[patch056.sha256sum] = "063a8d8d74e4407bf07a32b965b8ef6d213a66abdb6af26cc3584a437a56bbb4" -SRC_URI[patch057.md5sum] = "47d98e3e042892495c5efe54ec6e5913" -SRC_URI[patch057.sha256sum] = "5fc689394d515990f5ea74e2df765fc6e5e42ca44b4591b2c6f9be4b0cadf0f0" - -PARALLEL_MAKE = "" diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.57.bb b/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.57.bb new file mode 100644 index 000000000..5c288b35a --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash_3.2.57.bb @@ -0,0 +1,18 @@ +require bash.inc + +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=fd5d9bcabd8ed5a54a01ce8d183d592a" + +SRC_URI = "${GNU_MIRROR}/${BPN}/${BP}.tar.gz \ + file://mkbuiltins_have_stringize.patch \ + file://build-tests.patch \ + file://test-output.patch \ + file://run-ptest \ + file://dont-include-target-CFLAGS-in-host-LDFLAGS.patch \ + file://string-format.patch \ + " + +SRC_URI[md5sum] = "237a8767c990b43ae2c89895c2dbc062" +SRC_URI[sha256sum] = "3fa9daf85ebf35068f090ce51283ddeeb3c75eb5bc70b1a4a7cb05868bfe06a4" + +PARALLEL_MAKE = "" diff --git a/import-layers/yocto-poky/meta/recipes-extended/bash/bash_4.3.30.bb b/import-layers/yocto-poky/meta/recipes-extended/bash/bash_4.3.30.bb index 765562fbd..b40059fa1 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/bash/bash_4.3.30.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/bash/bash_4.3.30.bb @@ -21,6 +21,7 @@ SRC_URI = "${GNU_MIRROR}/bash/${BP}.tar.gz;name=tarball \ ${GNU_MIRROR}/bash/bash-4.3-patches/bash43-044;apply=yes;striplevel=0;name=patch044 \ ${GNU_MIRROR}/bash/bash-4.3-patches/bash43-045;apply=yes;striplevel=0;name=patch045 \ ${GNU_MIRROR}/bash/bash-4.3-patches/bash43-046;apply=yes;striplevel=0;name=patch046 \ + ${GNU_MIRROR}/bash/bash-4.3-patches/bash43-047;apply=yes;striplevel=0;name=patch047 \ file://execute_cmd.patch;striplevel=0 \ file://mkbuiltins_have_stringize.patch \ file://build-tests.patch \ @@ -30,6 +31,7 @@ SRC_URI = "${GNU_MIRROR}/bash/${BP}.tar.gz;name=tarball \ file://fix-run-builtins.patch \ file://0001-help-fix-printf-format-security-warning.patch \ file://fix-run-intl.patch \ + file://CVE-2016-9401.patch \ " SRC_URI[tarball.md5sum] = "a27b3ee9be83bd3ba448c0ff52b28447" @@ -67,5 +69,7 @@ SRC_URI[patch045.md5sum] = "4473244ca5abfd4b018ea26dc73e7412" SRC_URI[patch045.sha256sum] = "ba6ec3978e9eaa1eb3fabdaf3cc6fdf8c4606ac1c599faaeb4e2d69864150023" SRC_URI[patch046.md5sum] = "7e5fb09991c077076b86e0e057798913" SRC_URI[patch046.sha256sum] = "b3b456a6b690cd293353f17e22d92a202b3c8bce587ae5f2667c20c9ab6f688f" +SRC_URI[patch047.md5sum] = "8483153bad1a6f52cadc3bd9a8df7835" +SRC_URI[patch047.sha256sum] = "c69248de7e78ba6b92f118fe1ef47bc86479d5040fe0b1f908ace1c9e3c67c4a" BBCLASSEXTEND = "nativesdk" diff --git a/import-layers/yocto-poky/meta/recipes-extended/chkconfig/chkconfig_1.3.58.bb b/import-layers/yocto-poky/meta/recipes-extended/chkconfig/chkconfig_1.3.58.bb index e8390264c..2f1f6c026 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/chkconfig/chkconfig_1.3.58.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/chkconfig/chkconfig_1.3.58.bb @@ -6,7 +6,7 @@ of the drudgery of manually editing the symbolic links." RECIPE_NO_UPDATE_REASON = "Version 1.5 requires selinux" -HOMEPAGE = "http://fedorahosted.org/releases/c/h/chkconfig" +HOMEPAGE = "https://github.com/fedora-sysv" LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=5574c6965ae5f583e55880e397fbb018" @@ -16,12 +16,16 @@ PROVIDES += "virtual/update-alternatives" PR = "r7" -SRC_URI = "http://fedorahosted.org/releases/c/h/chkconfig/${BPN}-${PV}.tar.bz2 \ +S = "${WORKDIR}/${BPN}-${BPN}-${PV}" + +UPSTREAM_CHECK_URI = "https://github.com/fedora-sysv/${BPN}/releases" + +SRC_URI = "https://github.com/fedora-sysv/chkconfig/archive/chkconfig-${PV}.tar.gz \ file://replace_caddr_t.patch \ " -SRC_URI[md5sum] = "c2039ca67f2749fe0c06ef7c6f8ee246" -SRC_URI[sha256sum] = "18b497d25b2cada955c72810e45fcad8280d105f17cf45e2970f18271211de68" +SRC_URI[md5sum] = "3f51ac38a234be5278b3a2d9705eda5e" +SRC_URI[sha256sum] = "bf1e81f0d7cc999b536c9fe7877abf584a4082fd03c9d2597b6f090966579b40" inherit gettext diff --git a/import-layers/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.1.bb b/import-layers/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.1.bb index 99b2bb5c8..6d46629ee 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.1.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/cronie/cronie_1.5.1.bb @@ -3,7 +3,7 @@ DESCRIPTION = "Cronie contains the standard UNIX daemon crond that runs \ specified programs at scheduled times and related tools. It is based on the \ original cron and has security and configuration enhancements like the \ ability to use pam and SELinux." -HOMEPAGE = "https://fedorahosted.org/cronie/" +HOMEPAGE = "https://github.com/cronie-crond/cronie/" BUGTRACKER = "https://bugzilla.redhat.com" # Internet Systems Consortium License @@ -14,7 +14,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=dd2a592170760e1386c769e1043b3722 \ SECTION = "utils" -SRC_URI = "https://fedorahosted.org/releases/c/r/cronie/cronie-${PV}.tar.gz \ +UPSTREAM_CHECK_URI = "https://github.com/cronie-crond/${BPN}/releases/" + +SRC_URI = "https://github.com/cronie-crond/cronie/releases/download/cronie-${PV}/cronie-${PV}.tar.gz \ file://crond.init \ file://crontab \ file://crond.service \ diff --git a/import-layers/yocto-poky/meta/recipes-extended/diffutils/diffutils_3.4.bb b/import-layers/yocto-poky/meta/recipes-extended/diffutils/diffutils_3.4.bb index cb7092b51..be280ec0f 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/diffutils/diffutils_3.4.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/diffutils/diffutils_3.4.bb @@ -10,6 +10,9 @@ SRC_URI = "${GNU_MIRROR}/diffutils/diffutils-${PV}.tar.xz \ EXTRA_OECONF += "--without-libsigsegv-prefix" +# Fix "Argument list too long" error when len(TMPDIR) = 410 +acpaths = "-I ./m4" + do_configure_prepend () { # Need to remove gettext macros with weird mix of versions for i in codeset.m4 gettext_gl.m4 intlmacosx.m4 inttypes-pri.m4 lib-ld_gl.m4 lib-prefix_gl.m4 po_gl.m4 ssize_t.m4 wchar_t.m4 wint_t.m4; do diff --git a/import-layers/yocto-poky/meta/recipes-extended/ed/ed_1.9.bb b/import-layers/yocto-poky/meta/recipes-extended/ed/ed_1.9.bb index f2ec42ad1..d128de321 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/ed/ed_1.9.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/ed/ed_1.9.bb @@ -11,7 +11,7 @@ SECTION = "base" # LSB states that ed should be in /bin/ bindir = "${base_bindir}" -SRC_URI = "${GNU_MIRROR}/ed/ed-${PV}.tar.gz" +SRC_URI = "https://ftp.osuosl.org/pub/blfs/conglomeration/ed/ed-${PV}.tar.gz" SRC_URI[md5sum] = "565b6d1d5a9a8816b9b304fc4ed9405d" SRC_URI[sha256sum] = "d5b372cfadf073001823772272fceac2cfa87552c5cd5a8efc1c8aae61f45a88" diff --git a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10219.patch b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10219.patch new file mode 100644 index 000000000..574abe0e4 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10219.patch @@ -0,0 +1,49 @@ +From 4bef1a1d32e29b68855616020dbff574b9cda08f Mon Sep 17 00:00:00 2001 +From: Robin Watts <Robin.Watts@artifex.com> +Date: Thu, 29 Dec 2016 15:57:43 +0000 +Subject: [PATCH] Bug 697453: Avoid divide by 0 in scan conversion code. + +Arithmetic overflow due to extreme values in the scan conversion +code can cause a division by 0. + +Avoid this with a simple extra check. + + dx_old=cf814d81 + endp->x_next=b0e859b9 + alp->x_next=8069a73a + +leads to dx_den = 0 + +Upstream-Status: Backport +CVE: CVE-2016-10219 + +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + base/gxfill.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/base/gxfill.c b/base/gxfill.c +index 99196c0..2f81bb0 100644 +--- a/base/gxfill.c ++++ b/base/gxfill.c +@@ -1741,7 +1741,7 @@ intersect(active_line *endp, active_line *alp, fixed y, fixed y1, fixed *p_y_new + fixed dx_old = alp->x_current - endp->x_current; + fixed dx_den = dx_old + endp->x_next - alp->x_next; + +- if (dx_den <= dx_old) ++ if (dx_den <= dx_old || dx_den == 0) + return false; /* Intersection isn't possible. */ + dy = y1 - y; + if_debug3('F', "[F]cross: dy=%g, dx_old=%g, dx_new=%g\n", +@@ -1750,7 +1750,7 @@ intersect(active_line *endp, active_line *alp, fixed y, fixed y1, fixed *p_y_new + /* Do the computation in single precision */ + /* if the values are small enough. */ + y_new = +- ((dy | dx_old) < 1L << (size_of(fixed) * 4 - 1) ? ++ (((ufixed)(dy | dx_old)) < (1L << (size_of(fixed) * 4 - 1)) ? + dy * dx_old / dx_den : + (INCR_EXPR(mq_cross), fixed_mult_quo(dy, dx_old, dx_den))) + + y; +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10220.patch b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10220.patch new file mode 100644 index 000000000..5e1e8ba10 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2016-10220.patch @@ -0,0 +1,55 @@ +From daf85701dab05f17e924a48a81edc9195b4a04e8 Mon Sep 17 00:00:00 2001 +From: Ken Sharp <ken.sharp@artifex.com> +Date: Wed, 21 Dec 2016 16:54:14 +0000 +Subject: [PATCH] fix crash with bad data supplied to makeimagedevice + +Bug #697450 "Null pointer dereference in gx_device_finalize()" + +The problem here is that the code to finalise a device unconditionally +frees the icc_struct member of the device structure. However this +particular (weird) device is not setup as a normal device, probably +because its very, very ancient. Its possible for the initialisation +of the device to abort with an error before calling gs_make_mem_device() +which is where the icc_struct member gets allocated (or set to NULL). + +If that happens, then the cleanup code tries to free the device, which +calls finalize() which tries to free a garbage pointer. + +Setting the device memory to 0x00 after we allocate it means that the +icc_struct member will be NULL< and our memory manager allows for that +happily enough, which avoids the problem. + +Upstream-Status: Backport +CVE: CVE-2016-10220 + +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + base/gsdevmem.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/base/gsdevmem.c b/base/gsdevmem.c +index 97b9cf4..fe75bcc 100644 +--- a/base/gsdevmem.c ++++ b/base/gsdevmem.c +@@ -225,6 +225,18 @@ gs_makewordimagedevice(gx_device ** pnew_dev, const gs_matrix * pmat, + + if (pnew == 0) + return_error(gs_error_VMerror); ++ ++ /* Bug #697450 "Null pointer dereference in gx_device_finalize()" ++ * If we have incorrect data passed to gs_initialise_wordimagedevice() then the ++ * initialisation will fail, crucially it will fail *before* it calls ++ * gs_make_mem_device() which initialises the device. This means that the ++ * icc_struct member will be uninitialsed, but the device finalise method ++ * will unconditionally free that memory. Since its a garbage pointer, bad things happen. ++ * Apparently we do still need makeimagedevice to be available from ++ * PostScript, so in here just zero the device memory, which means that ++ * the finalise routine won't have a problem. ++ */ ++ memset(pnew, 0x00, st_device_memory.ssize); + code = gs_initialize_wordimagedevice(pnew, pmat, width, height, + colors, num_colors, word_oriented, + page_device, mem); +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-5951.patch b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-5951.patch new file mode 100644 index 000000000..62cc1342a --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-5951.patch @@ -0,0 +1,44 @@ +From bfa6b2ecbe48edc69a7d9d22a12419aed25960b8 Mon Sep 17 00:00:00 2001 +From: Chris Liddell <chris.liddell@artifex.com> +Date: Thu, 6 Apr 2017 16:44:54 +0100 +Subject: [PATCH] Bug 697548: use the correct param list enumerator + +When we encountered dictionary in a ref_param_list, we were using the enumerator +for the "parent" param_list, rather than the enumerator for the param_list +we just created for the dictionary. That parent was usually the stack +list enumerator, and caused a segfault. + +Using the correct enumerator works better. + +Upstream-Status: Backport +CVE: CVE-2017-5951 + +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + psi/iparam.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/psi/iparam.c b/psi/iparam.c +index 4e63b6d..b2fa85f 100644 +--- a/psi/iparam.c ++++ b/psi/iparam.c +@@ -770,12 +770,13 @@ ref_param_read_typed(gs_param_list * plist, gs_param_name pkey, + gs_param_enumerator_t enumr; + gs_param_key_t key; + ref_type keytype; ++ dict_param_list *dlist = (dict_param_list *) pvalue->value.d.list; + + param_init_enumerator(&enumr); +- if (!(*((iparam_list *) plist)->enumerate) +- ((iparam_list *) pvalue->value.d.list, &enumr, &key, &keytype) ++ if (!(*(dlist->enumerate)) ++ ((iparam_list *) dlist, &enumr, &key, &keytype) + && keytype == t_integer) { +- ((dict_param_list *) pvalue->value.d.list)->int_keys = 1; ++ dlist->int_keys = 1; + pvalue->type = gs_param_type_dict_int_keys; + } + } +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-7207.patch b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-7207.patch new file mode 100644 index 000000000..a05dc02c6 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2017-7207.patch @@ -0,0 +1,39 @@ +From 0e88bee1304993668fede72498d656a2dd33a35e Mon Sep 17 00:00:00 2001 +From: Ken Sharp <ken.sharp@artifex.com> +Date: Mon, 20 Mar 2017 09:34:11 +0000 +Subject: [PATCH] Ensure a device has raster memory, before trying to read it. + +Bug #697676 "Null pointer dereference in mem_get_bits_rectangle()" + +This is only possible by abusing/mis-using Ghostscript-specific +language extensions, so cannot happen in a general PostScript program. + +Nevertheless, Ghostscript should not crash. So this commit checks the +memory device to see if raster memory has been allocated, before trying +to read from it. + +Upstream-Status: Backport +CVE: CVE-2017-7207 + +Author: Ken Sharp <ken.sharp@artifex.com> +Signed-off-by: Catalin Enache <catalin.enache@windriver.com> +--- + base/gdevmem.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/base/gdevmem.c b/base/gdevmem.c +index 41108ba..183f96d 100644 +--- a/base/gdevmem.c ++++ b/base/gdevmem.c +@@ -605,6 +605,8 @@ mem_get_bits_rectangle(gx_device * dev, const gs_int_rect * prect, + GB_PACKING_CHUNKY | GB_COLORS_NATIVE | GB_ALPHA_NONE; + return_error(gs_error_rangecheck); + } ++ if (mdev->line_ptrs == 0x00) ++ return_error(gs_error_rangecheck); + if ((w <= 0) | (h <= 0)) { + if ((w | h) < 0) + return_error(gs_error_rangecheck); +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.19.bb b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.19.bb index fe2016b15..ab58157cd 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.19.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/ghostscript/ghostscript_9.19.bb @@ -30,6 +30,10 @@ SRC_URI = "${SRC_URI_BASE} \ file://ghostscript-9.02-genarch.patch \ file://objarch.h \ file://cups-no-gcrypt.patch \ + file://CVE-2017-7207.patch \ + file://CVE-2016-10219.patch \ + file://CVE-2016-10220.patch \ + file://CVE-2017-5951.patch \ " SRC_URI_class-native = "${SRC_URI_BASE} \ diff --git a/import-layers/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.2.1.bb b/import-layers/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.2.2.bb index b65b5df01..7917ce707 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.2.1.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/libarchive/libarchive_3.2.2.bb @@ -34,8 +34,8 @@ PACKAGECONFIG[lz4] = "--with-lz4,--without-lz4,lz4," SRC_URI = "http://libarchive.org/downloads/libarchive-${PV}.tar.gz \ " -SRC_URI[md5sum] = "afa257047d1941a565216edbf0171e72" -SRC_URI[sha256sum] = "72ee1a4e3fd534525f13a0ba1aa7b05b203d186e0c6072a8a4738649d0b3cfd2" +SRC_URI[md5sum] = "1ec00b7dcaf969dd2a5712f85f23c764" +SRC_URI[sha256sum] = "691c194ee132d1f0f7a42541f091db811bc2e56f7107e9121be2bc8c04f1060f" inherit autotools update-alternatives pkgconfig @@ -48,7 +48,7 @@ do_configure_prepend() { cp -R ${STAGING_INCDIR_NATIVE}/ext2fs ${WORKDIR}/extra-includes/ } -ALTERNATIVE_PRIORITY = "100" +ALTERNATIVE_PRIORITY = "80" PACKAGES =+ "bsdtar" FILES_bsdtar = "${bindir}/bsdtar" diff --git a/import-layers/yocto-poky/meta/recipes-extended/libuser/libuser_0.62.bb b/import-layers/yocto-poky/meta/recipes-extended/libuser/libuser_0.62.bb index 3d0b516ab..07028d55a 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/libuser/libuser_0.62.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/libuser/libuser_0.62.bb @@ -1,8 +1,8 @@ SUMMARY = "user and group account administration library" DESCRIPTION = "The libuser library implements a standardized interface for manipulating and administering user \ and group accounts" -HOMEPAGE = "https://fedorahosted.org/libuser/" -BUGTRACKER = "https://fedorahosted.org/libuser/newticket" +HOMEPAGE = "https://pagure.io/libuser" +BUGTRACKER = "https://pagure.io/libuser/issues" LICENSE = "LGPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ @@ -11,7 +11,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ SECTION = "base" -SRC_URI = "https://fedorahosted.org/releases/l/i/libuser/libuser-${PV}.tar.xz \ +SRC_URI = "https://releases.pagure.org/libuser/libuser-${PV}.tar.xz \ file://0001-Check-for-issetugid.patch \ file://0002-remove-unused-execinfo.h.patch \ file://0001-modules-files.c-parse_field-fix-string-formating-in-.patch \ diff --git a/import-layers/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb b/import-layers/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb index 5f1a601ae..5bd338117 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/logrotate/logrotate_3.9.1.bb @@ -1,6 +1,6 @@ SUMMARY = "Rotates, compresses, removes and mails system log files" SECTION = "console/utils" -HOMEPAGE = "https://fedorahosted.org/logrotate/" +HOMEPAGE = "https://github.com/logrotate/logrotate/issues" LICENSE = "GPLv2" # TODO: logrotate 3.8.8 adds autotools/automake support, update recipe to use it. @@ -10,14 +10,23 @@ DEPENDS="coreutils popt" LIC_FILES_CHKSUM = "file://COPYING;md5=18810669f13b87348459e611d31ab760" -SRC_URI = "https://fedorahosted.org/releases/l/o/logrotate/logrotate-${PV}.tar.gz \ +# When updating logrotate to latest upstream, SRC_URI should point to +# a proper release tarball from https://github.com/logrotate/logrotate/releases +# and we have to take the snapshot for now because there is no such +# tarball available for 3.9.1. + +S = "${WORKDIR}/${BPN}-r3-9-1" + +UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" + +SRC_URI = "https://github.com/${BPN}/${BPN}/archive/r3-9-1.tar.gz \ file://act-as-mv-when-rotate.patch \ file://update-the-manual.patch \ file://disable-check-different-filesystems.patch \ " -SRC_URI[md5sum] = "4492b145b6d542e4a2f41e77fa199ab0" -SRC_URI[sha256sum] = "022769e3288c80981559a8421703c88e8438b447235e36dd3c8e97cd94c52545" +SRC_URI[md5sum] = "8572b7c2cf9ade09a8a8e10098500fb3" +SRC_URI[sha256sum] = "5bf8e478c428e7744fefa465118f8296e7e771c981fb6dffb7527856a0ea3617" PACKAGECONFIG ?= "\ ${@bb.utils.contains('DISTRO_FEATURES', 'acl', 'acl', '', d)} \ diff --git a/import-layers/yocto-poky/meta/recipes-extended/lsof/lsof_4.89.bb b/import-layers/yocto-poky/meta/recipes-extended/lsof/lsof_4.89.bb index b732cf0ac..29245b1ab 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/lsof/lsof_4.89.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/lsof/lsof_4.89.bb @@ -1,20 +1,24 @@ SUMMARY = "LiSt Open Files tool" DESCRIPTION = "Lsof is a Unix-specific diagnostic tool. \ Its name stands for LiSt Open Files, and it does just that." +HOMEPAGE = "http://people.freebsd.org/~abe/" SECTION = "devel" LICENSE = "BSD" +LIC_FILES_CHKSUM = "file://00README;beginline=645;endline=679;md5=964df275d26429ba3b39dbb9f205172a" -SRC_URI = "ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/lsof_${PV}.tar.bz2" +# Upstream lsof releases are hosted on an ftp server which times out download +# attempts from hosts for which it can not perform a DNS reverse-lookup (See: +# https://people.freebsd.org/~abe/ ). http://www.mirrorservice.org seems to be +# the most commonly used alternative. + +SRC_URI = "http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof/lsof_${PV}.tar.bz2" SRC_URI[md5sum] = "1b9cd34f3fb86856a125abbf2be3a386" SRC_URI[sha256sum] = "81ac2fc5fdc944793baf41a14002b6deb5a29096b387744e28f8c30a360a3718" -UPSTREAM_CHECK_URI = "http://www.mirrorservice.org/sites/lsof.itap.purdue.edu/pub/tools/unix/lsof" - LOCALSRC = "file://${WORKDIR}/lsof_${PV}/lsof_${PV}_src.tar" -S = "${WORKDIR}/lsof_${PV}_src" -LIC_FILES_CHKSUM = "file://${S}/00README;beginline=645;endline=679;md5=964df275d26429ba3b39dbb9f205172a" +S = "${WORKDIR}/lsof_${PV}_src" python do_unpack () { # temporarily change S for unpack @@ -36,11 +40,11 @@ export LSOF_INCLUDE = "${STAGING_INCDIR}" do_configure () { export LSOF_AR="${AR} cr" export LSOF_RANLIB="${RANLIB}" - if [ "x${GLIBCVERSION}" != "x" ];then - LINUX_CLIB=`echo ${GLIBCVERSION} |sed -e 's,\.,,g'` - LINUX_CLIB="-DGLIBCV=${LINUX_CLIB}" - export LINUX_CLIB - fi + if [ "x${GLIBCVERSION}" != "x" ]; then + LINUX_CLIB=`echo ${GLIBCVERSION} |sed -e 's,\.,,g'` + LINUX_CLIB="-DGLIBCV=${LINUX_CLIB}" + export LINUX_CLIB + fi yes | ./Configure linux } @@ -53,6 +57,6 @@ do_compile () { do_install () { install -d ${D}${sbindir} ${D}${mandir}/man8 - install -m 4755 lsof ${D}${sbindir}/lsof + install -m 0755 lsof ${D}${sbindir}/lsof install -m 0644 lsof.8 ${D}${mandir}/man8/lsof.8 } diff --git a/import-layers/yocto-poky/meta/recipes-extended/newt/libnewt_0.52.19.bb b/import-layers/yocto-poky/meta/recipes-extended/newt/libnewt_0.52.19.bb index a26ce1fbe..de76ce20c 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/newt/libnewt_0.52.19.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/newt/libnewt_0.52.19.bb @@ -8,7 +8,7 @@ shared library needed by programs built with newt, as well as a \ /usr/bin/dialog replacement called whiptail. Newt is based on the \ slang library." -HOMEPAGE = "https://fedorahosted.org/newt/" +HOMEPAGE = "https://releases.pagure.org/newt/" SECTION = "libs" LICENSE = "LGPLv2" @@ -17,7 +17,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2" # slang needs to be >= 2.2 DEPENDS = "slang popt" -SRC_URI = "https://fedorahosted.org/releases/n/e/newt/newt-${PV}.tar.gz \ +SRC_URI = "https://releases.pagure.org/newt/newt-${PV}.tar.gz \ file://fix_SHAREDDIR.patch \ file://cross_ar.patch \ file://Makefile.in-Add-tinfo-library-to-the-linking-librari.patch \ diff --git a/import-layers/yocto-poky/meta/recipes-extended/shadow/shadow.inc b/import-layers/yocto-poky/meta/recipes-extended/shadow/shadow.inc index 35a18f8ab..f79565b35 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/shadow/shadow.inc +++ b/import-layers/yocto-poky/meta/recipes-extended/shadow/shadow.inc @@ -180,11 +180,12 @@ ALTERNATIVE_${PN}-base = "newgrp groups login su" ALTERNATIVE_LINK_NAME[login] = "${base_bindir}/login" ALTERNATIVE_LINK_NAME[su] = "${base_bindir}/su" -ALTERNATIVE_${PN}-doc = "passwd.5 getspnam.3 groups.1 su.1" +ALTERNATIVE_${PN}-doc = "passwd.5 getspnam.3 groups.1 su.1 nologin.8" ALTERNATIVE_LINK_NAME[passwd.5] = "${mandir}/man5/passwd.5" ALTERNATIVE_LINK_NAME[getspnam.3] = "${mandir}/man3/getspnam.3" ALTERNATIVE_LINK_NAME[groups.1] = "${mandir}/man1/groups.1" ALTERNATIVE_LINK_NAME[su.1] = "${mandir}/man1/su.1" +ALTERNATIVE_LINK_NAME[nologin.8] = "${mandir}/man8/nologin.8" pkg_postinst_${PN} () { if [ "x$D" != "x" ]; then diff --git a/import-layers/yocto-poky/meta/recipes-extended/slang/slang/no-x.patch b/import-layers/yocto-poky/meta/recipes-extended/slang/slang/no-x.patch new file mode 100644 index 000000000..d7666bfc8 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/slang/slang/no-x.patch @@ -0,0 +1,14 @@ +There's no need to check for the X libraries as the socket module doesn't use +anything from X. + +Upstream-Status: Pending +Signed-off-by: Ross Burton <ross.burton@intel.com> + +diff --git a/autoconf/configure.ac b/autoconf/configure.ac +index b61e974..a3e5db2 100644 +--- a/autoconf/configure.ac ++++ b/autoconf/configure.ac +@@ -72,3 +71,0 @@ AC_SUBST(LIB_READLINE) +-# For the socket module +-AC_PATH_XTRA +- diff --git a/import-layers/yocto-poky/meta/recipes-extended/slang/slang_2.3.0.bb b/import-layers/yocto-poky/meta/recipes-extended/slang/slang_2.3.0.bb index 17efbbe22..d5967d25d 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/slang/slang_2.3.0.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/slang/slang_2.3.0.bb @@ -9,7 +9,7 @@ to recode S-Lang procedures in C if you need to." HOMEPAGE = "http://www.jedsoft.org/slang/" SECTION = "libs" -DEPENDS = "pcre ncurses" +DEPENDS = "ncurses virtual/libiconv" LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=a52a18a472d4f7e45479b06563717c02" @@ -20,17 +20,23 @@ SRC_URI = "http://www.jedsoft.org/releases/${BPN}/${BP}.tar.bz2 \ file://fix-check-pcre.patch \ file://slang-fix-the-iconv-existence-checking.patch \ file://0001-Fix-error-conflicting-types-for-posix_close.patch \ + file://no-x.patch \ " +SRC_URI[md5sum] = "3bcc790460d52db1316c20395b7ac2f1" +SRC_URI[sha256sum] = "f95224060f45e0d8212a5039b339afa5f1a94a1bb0298e796104e5b12e926129" + UPSTREAM_CHECK_URI = "http://www.jedsoft.org/releases/slang/" +PREMIRRORS_append = "\n http://www.jedsoft.org/releases/slang/.* http://www.jedsoft.org/releases/slang/old/ \n" inherit autotools-brokensep - CLEANBROKEN = "1" -SRC_URI[md5sum] = "3bcc790460d52db1316c20395b7ac2f1" -SRC_URI[sha256sum] = "f95224060f45e0d8212a5039b339afa5f1a94a1bb0298e796104e5b12e926129" +EXTRA_OECONF = "--without-onig" -EXTRA_OECONF += " --without-z --without-png --without-onig --x-includes=${STAGING_DIR_HOST}/usr/include/X11 --x-libraries=${STAGING_DIR_HOST}/usr/lib" +PACKAGECONFIG ??= "pcre" +PACKAGECONFIG[pcre] = "--with-pcre,--without-pcre,pcre" +PACKAGECONFIG[png] = "--with-png,--without-png,libpng" +PACKAGECONFIG[zlib] = "--with-z,--without-z,zlib" do_configure_prepend() { # slang keeps configure.ac and rest of autoconf files in autoconf/ directory @@ -47,5 +53,6 @@ do_install() { FILES_${PN} += "${libdir}/${BPN}/v2/modules/ ${datadir}/slsh/" PARALLEL_MAKE = "" +PARALLEL_MAKEINST = "" BBCLASSEXTEND = "native" diff --git a/import-layers/yocto-poky/meta/recipes-extended/tar/tar/CVE-2016-6321.patch b/import-layers/yocto-poky/meta/recipes-extended/tar/tar/CVE-2016-6321.patch new file mode 100644 index 000000000..6d35bcc51 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/tar/tar/CVE-2016-6321.patch @@ -0,0 +1,66 @@ +From 7340f67b9860ea0531c1450e5aa261c50f67165d Mon Sep 17 00:00:00 2001 +From: Paul Eggert <eggert@Penguin.CS.UCLA.EDU> +Date: Sat, 29 Oct 2016 21:04:40 -0700 +Subject: [PATCH] When extracting, skip ".." members + +* NEWS: Document this. +* src/extract.c (extract_archive): Skip members whose names +contain "..". + +CVE: CVE-2016-6321 +Upstream-Status: Backport + +Cherry picked from commit: 7340f67 When extracting, skip ".." members + +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> +--- + NEWS | 8 +++++++- + src/extract.c | 8 ++++++++ + 2 files changed, 15 insertions(+), 1 deletion(-) + +diff --git a/NEWS b/NEWS +index 501164a..fc97cfc 100644 +--- a/NEWS ++++ b/NEWS +@@ -1,6 +1,12 @@ +-GNU tar NEWS - User visible changes. 2016-05-16 ++GNU tar NEWS - User visible changes. 2016-10-29 + Please send GNU tar bug reports to <bug-tar@gnu.org> + ++* Member names containing '..' components are now skipped when extracting. ++ ++This fixes tar's behavior to match its documentation, and is a bit ++safer when extracting untrusted archives over old files (an unsafe ++practice that the tar manual has long recommended against). ++ + + version 1.29 - Sergey Poznyakoff, 2016-05-16 + +diff --git a/src/extract.c b/src/extract.c +index f982433..7904148 100644 +--- a/src/extract.c ++++ b/src/extract.c +@@ -1629,12 +1629,20 @@ extract_archive (void) + { + char typeflag; + tar_extractor_t fun; ++ bool skip_dotdot_name; + + fatal_exit_hook = extract_finish; + + set_next_block_after (current_header); + ++ skip_dotdot_name = (!absolute_names_option ++ && contains_dot_dot (current_stat_info.orig_file_name)); ++ if (skip_dotdot_name) ++ ERROR ((0, 0, _("%s: Member name contains '..'"), ++ quotearg_colon (current_stat_info.orig_file_name))); ++ + if (!current_stat_info.file_name[0] ++ || skip_dotdot_name + || (interactive_option + && !confirm ("extract", current_stat_info.file_name))) + { +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/tar/tar_1.29.bb b/import-layers/yocto-poky/meta/recipes-extended/tar/tar_1.29.bb index efce57d9d..f22d9c938 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/tar/tar_1.29.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/tar/tar_1.29.bb @@ -8,6 +8,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" SRC_URI += "file://remove-gets.patch \ file://musl_dirent.patch \ + file://CVE-2016-6321.patch \ " SRC_URI[md5sum] = "955cd533955acb1804b83fd70218da51" SRC_URI[sha256sum] = "236b11190c0a3a6885bdb8d61424f2b36a5872869aa3f7f695dea4b4843ae2f2" diff --git a/import-layers/yocto-poky/meta/recipes-extended/texi2html/files/0001-Allow-compiling-out-of-source.patch b/import-layers/yocto-poky/meta/recipes-extended/texi2html/files/0001-Allow-compiling-out-of-source.patch new file mode 100644 index 000000000..0cf025ff4 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-extended/texi2html/files/0001-Allow-compiling-out-of-source.patch @@ -0,0 +1,39 @@ +From: Olaf Mandel <o.mandel@menlosystems.com> +Date: Fri, 21 Oct 2016 13:04:44 +0000 +Subject: [PATCH] Allow compiling out-of-source + +Upstream-Status: Backport of [svn://svn.sv.gnu.org/texinfo/trunk r3602] +--- + Makefile.am | 2 +- + Makefile.in | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/Makefile.am b/Makefile.am +index 3447463..c9b5b5c 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -208,7 +208,7 @@ i18n/en.thl i18n/: $(po_document_dir)/po_document/$(PACKAGE)_document.pot + done; \ + msgexec -i $< "$(srcdir)/gettext_to_separated.pl" | "$(srcdir)/separated_to_hash.pl" en > i18n/en.thl; \ + else \ +- cp -p i18n_ref/*.thl i18n; \ ++ cp -p "$(srcdir)/i18n_ref/"*.thl i18n; \ + fi + + i18n_ref: +diff --git a/Makefile.in b/Makefile.in +index 4264b37..a13f84d 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -1126,7 +1126,7 @@ i18n/en.thl i18n/: $(po_document_dir)/po_document/$(PACKAGE)_document.pot + done; \ + msgexec -i $< "$(srcdir)/gettext_to_separated.pl" | "$(srcdir)/separated_to_hash.pl" en > i18n/en.thl; \ + else \ +- cp -p i18n_ref/*.thl i18n; \ ++ cp -p "$(srcdir)/i18n_ref/"*.thl i18n; \ + fi + + i18n_ref: +-- +2.1.4 + diff --git a/import-layers/yocto-poky/meta/recipes-extended/texi2html/texi2html_5.0.bb b/import-layers/yocto-poky/meta/recipes-extended/texi2html/texi2html_5.0.bb index eac289e3b..ae64816f8 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/texi2html/texi2html_5.0.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/texi2html/texi2html_5.0.bb @@ -7,6 +7,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552" PR = "r2" SRC_URI = "${SAVANNAH_GNU_MIRROR}/texi2html/${BPN}-${PV}.tar.bz2 \ + file://0001-Allow-compiling-out-of-source.patch \ " SRC_URI[md5sum] = "f15ac876fcdc8be865b16535f480aa54" diff --git a/import-layers/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016g.bb b/import-layers/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2017a.bb index a2e621741..2c26744f3 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2016g.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/tzcode/tzcode-native_2017a.bb @@ -9,15 +9,17 @@ SRC_URI =" http://www.iana.org/time-zones/repository/releases/tzcode${PV}.tar.gz http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata" UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones" -SRC_URI[tzcode.md5sum] = "f89867013676e3cb9544be2df7d36a91" -SRC_URI[tzcode.sha256sum] = "1ff90b47ad7986140a513b5287b1851c40f80fd44fd636db5cc5b46d06f9fa2b" -SRC_URI[tzdata.md5sum] = "3c7e97ec8527211104d27cc1d97a23de" -SRC_URI[tzdata.sha256sum] = "3c7137b2bc47323b0de47b77786bacf81ed503d4b2c693ff8ada2fbd1281ebd1" +SRC_URI[tzcode.md5sum] = "eef0bfac7a52dce6989a7d8b40d86fe0" +SRC_URI[tzcode.sha256sum] = "02f2c6b58b99edd0d47f0cad34075b359fd1a4dab71850f493b0404ded3b38ac" +SRC_URI[tzdata.md5sum] = "cb8274cd175f8a4d9d1b89895df876dc" +SRC_URI[tzdata.sha256sum] = "df3a5c4d0a2cf0cde0b3f35796ccf6c9acfd598b8e70f8dece5404cd7626bbd6" S = "${WORKDIR}" inherit native +EXTRA_OEMAKE += "cc=${CC}" + do_install () { install -d ${D}${bindir}/ install -m 755 zic ${D}${bindir}/ diff --git a/import-layers/yocto-poky/meta/recipes-extended/tzdata/tzdata_2016g.bb b/import-layers/yocto-poky/meta/recipes-extended/tzdata/tzdata_2017a.bb index 3ee4b5af6..ce59d7102 100644 --- a/import-layers/yocto-poky/meta/recipes-extended/tzdata/tzdata_2016g.bb +++ b/import-layers/yocto-poky/meta/recipes-extended/tzdata/tzdata_2017a.bb @@ -9,8 +9,8 @@ DEPENDS = "tzcode-native" SRC_URI = "http://www.iana.org/time-zones/repository/releases/tzdata${PV}.tar.gz;name=tzdata" UPSTREAM_CHECK_URI = "http://www.iana.org/time-zones" -SRC_URI[tzdata.md5sum] = "3c7e97ec8527211104d27cc1d97a23de" -SRC_URI[tzdata.sha256sum] = "3c7137b2bc47323b0de47b77786bacf81ed503d4b2c693ff8ada2fbd1281ebd1" +SRC_URI[tzdata.md5sum] = "cb8274cd175f8a4d9d1b89895df876dc" +SRC_URI[tzdata.sha256sum] = "df3a5c4d0a2cf0cde0b3f35796ccf6c9acfd598b8e70f8dece5404cd7626bbd6" inherit allarch diff --git a/import-layers/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.20.2.bb b/import-layers/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.20.2.bb index eb061b20e..6ef704970 100644 --- a/import-layers/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.20.2.bb +++ b/import-layers/yocto-poky/meta/recipes-gnome/gnome-desktop/gnome-desktop3_3.20.2.bb @@ -10,7 +10,7 @@ inherit gnome pkgconfig upstream-version-is-even gobject-introspection SRC_URI[archive.md5sum] = "8354ed38624f5eb6b5d34267b658a9c9" SRC_URI[archive.sha256sum] = "492c2da7aa8c3a8b65796e8171fc8f0dfb5d322dd2799c0d76392e1fb061e2b2" -DEPENDS += "intltool-native gsettings-desktop-schemas gconf libxrandr virtual/libx11 gtk+3 glib-2.0 startup-notification xkeyboard-config iso-codes" +DEPENDS += "intltool-native gsettings-desktop-schemas gconf virtual/libx11 gtk+3 glib-2.0 startup-notification xkeyboard-config iso-codes udev" inherit distro_features_check gtk-doc REQUIRED_DISTRO_FEATURES = "x11" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/mesa/mesa.inc b/import-layers/yocto-poky/meta/recipes-graphics/mesa/mesa.inc index e4880ffed..525a2d54c 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/mesa/mesa.inc +++ b/import-layers/yocto-poky/meta/recipes-graphics/mesa/mesa.inc @@ -79,6 +79,9 @@ EXCLUDE_FROM_WORLD = "1" # Remove the mesa dependency on mesa-dev, as mesa is empty RDEPENDS_${PN}-dev = "" +# Add dependency so that GLES3 header don't need to be added manually +RDEPENDS_libgles2-mesa-dev += "libgles3-mesa-dev" + PACKAGES =+ "libegl-mesa libegl-mesa-dev \ libosmesa libosmesa-dev \ libgl-mesa libgl-mesa-dev \ diff --git a/import-layers/yocto-poky/meta/recipes-graphics/ttf-fonts/liberation-fonts_1.04.bb b/import-layers/yocto-poky/meta/recipes-graphics/ttf-fonts/liberation-fonts_1.04.bb index bda82e7ef..74212e7e5 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/ttf-fonts/liberation-fonts_1.04.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/ttf-fonts/liberation-fonts_1.04.bb @@ -2,7 +2,7 @@ SUMMARY = "Liberation(tm) Fonts" DESCRIPTION = "The Liberation(tm) Fonts is a font family originally \ created by Ascender(c) which aims at metric compatibility with \ Arial, Times New Roman, Courier New." -HOMEPAGE = "https://fedorahosted.org/liberation-fonts/" +HOMEPAGE = "https://releases.pagure.org/liberation-fonts/" BUGTRACKER = "https://bugzilla.redhat.com/" RECIPE_NO_UPDATE_REASON = "2.x depends on fontforge package, which is not yet provided in oe-core" @@ -17,7 +17,7 @@ inherit allarch fontcache FONT_PACKAGES = "${PN}" -SRC_URI = "https://fedorahosted.org/releases/l/i/liberation-fonts/liberation-fonts-${PV}.tar.gz \ +SRC_URI = "https://releases.pagure.org/liberation-fonts/liberation-fonts-${PV}.tar.gz \ file://30-liberation-aliases.conf" SRC_URI[md5sum] = "4846797ef0fc70b0cbaede2514677c58" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston/weston-1.11-config-option-for-no-input-device.patch b/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston/weston-1.11-config-option-for-no-input-device.patch new file mode 100644 index 000000000..6f5ad6652 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston/weston-1.11-config-option-for-no-input-device.patch @@ -0,0 +1,123 @@ +From bbf2e6ebbd9c051775f43e1e3c3a2f41322342e8 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Daniel=20D=C3=ADaz?= <daniel.diaz@linaro.org> +Date: Fri, 21 Oct 2016 14:03:13 -0500 +Subject: [PATCH] Add configuration option for no input device. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +[Backported from master, 75b7197.] + +As it has been discussed in the past [1], running Weston +without any input device at launch might be beneficial for +some use cases. + +Certainly, it's best for the vast majority of users (and +the project) to require an input device to be present, as +to avoid frustration and hassle, but for those brave souls +that so prefer, this patch lets them run without any input +device at all. + +This introduces a simple configuration in weston.ini: + [core] + require-input=true + +True is the default, so no behavioral change is introduced. + +[1] https://lists.freedesktop.org/archives/wayland-devel/2015-November/025193.html + +Signed-off-by: Daniel DĂaz <daniel.diaz@linaro.org> +--- + man/weston.ini.man | 5 +++++ + src/compositor.h | 4 ++++ + src/libinput-seat.c | 6 ++++++ + src/main.c | 5 +++++ + weston.ini.in | 1 + + 5 files changed, 21 insertions(+) + +diff --git a/man/weston.ini.man b/man/weston.ini.man +index d7c4a6f..c7d0f01 100644 +--- a/man/weston.ini.man ++++ b/man/weston.ini.man +@@ -169,6 +169,11 @@ time, the one specified in the command-line will be used. On the other + hand, if none of these sets the value, default idle timeout will be + set to 300 seconds. + .RS ++.PP ++.RE ++.TP 7 ++.BI "require-input=" true ++require an input device for launch + + .SH "LIBINPUT SECTION" + The +diff --git a/src/compositor.h b/src/compositor.h +index 0bbf458..476b650 100644 +--- a/src/compositor.h ++++ b/src/compositor.h +@@ -803,6 +803,10 @@ struct weston_compositor { + + void *user_data; + void (*exit)(struct weston_compositor *c); ++ ++ /* Whether to let the compositor run without any input device. */ ++ bool require_input; ++ + }; + + struct weston_buffer { +diff --git a/src/libinput-seat.c b/src/libinput-seat.c +index 8ce0ee0..e1fdcf0 100644 +--- a/src/libinput-seat.c ++++ b/src/libinput-seat.c +@@ -255,6 +255,12 @@ udev_input_enable(struct udev_input *input) + devices_found = 1; + } + ++ if (devices_found == 0 && !c->require_input) { ++ weston_log("warning: no input devices found, but none required " ++ "as per configuration.\n"); ++ return 0; ++ } ++ + if (devices_found == 0) { + weston_log( + "warning: no input devices on entering Weston. " +diff --git a/src/main.c b/src/main.c +index 3279ac6..09905ea 100644 +--- a/src/main.c ++++ b/src/main.c +@@ -1298,6 +1298,7 @@ int main(int argc, char *argv[]) + struct wl_client *primary_client; + struct wl_listener primary_client_destroyed; + struct weston_seat *seat; ++ int require_input; + + const struct weston_option core_options[] = { + { WESTON_OPTION_STRING, "backend", 'B', &backend }, +@@ -1373,6 +1374,10 @@ int main(int argc, char *argv[]) + if (weston_compositor_init_config(ec, config) < 0) + goto out; + ++ weston_config_section_get_bool(section, "require-input", ++ &require_input, true); ++ ec->require_input = require_input; ++ + if (load_backend(ec, backend, &argc, argv, config) < 0) { + weston_log("fatal: failed to create compositor backend\n"); + goto out; +diff --git a/weston.ini.in b/weston.ini.in +index 14a4c0c..d837fb5 100644 +--- a/weston.ini.in ++++ b/weston.ini.in +@@ -2,6 +2,7 @@ + #modules=xwayland.so,cms-colord.so + #shell=desktop-shell.so + #gbm-format=xrgb2101010 ++#require-input=true + + [shell] + background-image=/usr/share/backgrounds/gnome/Aqua.jpg +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston_1.11.0.bb b/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston_1.11.0.bb index 3ad309dab..9740ce9c9 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston_1.11.0.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/wayland/weston_1.11.0.bb @@ -14,6 +14,7 @@ SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \ file://xwayland.weston-start \ file://make-weston-launch-exit-for-unrecognized-option.patch \ file://0001-weston-launch-Provide-a-default-version-that-doesn-t.patch \ + file://weston-1.11-config-option-for-no-input-device.patch \ " SRC_URI[md5sum] = "bc6f90a2039163804aecfa663b69c4c2" SRC_URI[sha256sum] = "05e086e9f186a06843b9f7a5e1abf19347b1a6e4be26d7e74927abc17b6b7125" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontdir_1.0.7.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontdir_1.0.7.bb index a453e2487..737bc9de4 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontdir_1.0.7.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontdir_1.0.7.bb @@ -11,6 +11,9 @@ files." PE = "1" PR = "${INC_PR}.0" +DEPENDS = "util-macros-native" +REQUIRED_DISTRO_FEATURES_class-native = "" + RDEPENDS_${PN} += "mkfontscale" RDEPENDS_${PN}_class-native += "mkfontscale-native" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontscale_1.1.2.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontscale_1.1.2.bb index 31cf18676..d1aa9b361 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontscale_1.1.2.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-app/mkfontscale_1.1.2.bb @@ -10,6 +10,8 @@ is used by the mkfontdir program." DEPENDS = "util-macros-native zlib libfontenc freetype xproto" +REQUIRED_DISTRO_FEATURES_class-native = "" + BBCLASSEXTEND = "native" LIC_FILES_CHKSUM = "file://COPYING;md5=2e0d129d05305176d1a790e0ac1acb7f" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-font/xorg-font-common.inc b/import-layers/yocto-poky/meta/recipes-graphics/xorg-font/xorg-font-common.inc index d5267f59a..da20ab9b5 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-font/xorg-font-common.inc +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-font/xorg-font-common.inc @@ -18,6 +18,7 @@ inherit autotools pkgconfig distro_features_check # The mkfontscale-native requires x11 in DISTRO_FEATURES REQUIRED_DISTRO_FEATURES = "x11" +REQUIRED_DISTRO_FEATURES_class-native = "" EXTRA_OEMAKE += "FCCACHE=/bin/true UTIL_DIR=${STAGING_DIR_TARGET}\$\(MAPFILES_PATH\)" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7942.patch b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7942.patch new file mode 100644 index 000000000..f5b4d69d4 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7942.patch @@ -0,0 +1,69 @@ +From 8ea762f94f4c942d898fdeb590a1630c83235c17 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann <tobias@stoeckmann.org> +Date: Sun, 25 Sep 2016 21:25:25 +0200 +Subject: Validation of server responses in XGetImage() + +Check if enough bytes were received for specified image type and +geometry. Otherwise GetPixel and other functions could trigger an +out of boundary read later on. + +CVE: CVE-2016-7942 +Upstream-Status: Backport + +Signed-off-by: Tobias Stoeckmann <tobias@stoeckmann.org> +Reviewed-by: Matthieu Herrb <matthieu@herrb.eu> +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> + +diff --git a/src/GetImage.c b/src/GetImage.c +index c461abc..ff32d58 100644 +--- a/src/GetImage.c ++++ b/src/GetImage.c +@@ -59,6 +59,7 @@ XImage *XGetImage ( + char *data; + unsigned long nbytes; + XImage *image; ++ int planes; + LockDisplay(dpy); + GetReq (GetImage, req); + /* +@@ -91,18 +92,28 @@ XImage *XGetImage ( + return (XImage *) NULL; + } + _XReadPad (dpy, data, nbytes); +- if (format == XYPixmap) +- image = XCreateImage(dpy, _XVIDtoVisual(dpy, rep.visual), +- Ones (plane_mask & +- (((unsigned long)0xFFFFFFFF) >> (32 - rep.depth))), +- format, 0, data, width, height, dpy->bitmap_pad, 0); +- else /* format == ZPixmap */ +- image = XCreateImage (dpy, _XVIDtoVisual(dpy, rep.visual), +- rep.depth, ZPixmap, 0, data, width, height, +- _XGetScanlinePad(dpy, (int) rep.depth), 0); ++ if (format == XYPixmap) { ++ image = XCreateImage(dpy, _XVIDtoVisual(dpy, rep.visual), ++ Ones (plane_mask & ++ (((unsigned long)0xFFFFFFFF) >> (32 - rep.depth))), ++ format, 0, data, width, height, dpy->bitmap_pad, 0); ++ planes = image->depth; ++ } else { /* format == ZPixmap */ ++ image = XCreateImage (dpy, _XVIDtoVisual(dpy, rep.visual), ++ rep.depth, ZPixmap, 0, data, width, height, ++ _XGetScanlinePad(dpy, (int) rep.depth), 0); ++ planes = 1; ++ } + + if (!image) + Xfree(data); ++ if (planes < 1 || image->height < 1 || image->bytes_per_line < 1 || ++ INT_MAX / image->height <= image->bytes_per_line || ++ INT_MAX / planes <= image->height * image->bytes_per_line || ++ nbytes < planes * image->height * image->bytes_per_line) { ++ XDestroyImage(image); ++ image = NULL; ++ } + UnlockDisplay(dpy); + SyncHandle(); + return (image); +-- +cgit v0.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7943.patch b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7943.patch new file mode 100644 index 000000000..50024236d --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11/CVE-2016-7943.patch @@ -0,0 +1,103 @@ +From 8c29f1607a31dac0911e45a0dd3d74173822b3c9 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann <tobias@stoeckmann.org> +Date: Sun, 25 Sep 2016 21:22:57 +0200 +Subject: The validation of server responses avoids out of boundary accesses. + +v2: FontNames.c return a NULL list whenever a single +length field from the server is incohent. + +CVE: CVE-2016-7943 +Upstream-Status: Backport + +Signed-off-by: Tobias Stoeckmann <tobias@stoeckmann.org> +Reviewed-by: Matthieu Herrb <matthieu@herrb.eu> +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> + +diff --git a/src/FontNames.c b/src/FontNames.c +index 21dcafe..e55f338 100644 +--- a/src/FontNames.c ++++ b/src/FontNames.c +@@ -66,7 +66,7 @@ int *actualCount) /* RETURN */ + + if (rep.nFonts) { + flist = Xmalloc (rep.nFonts * sizeof(char *)); +- if (rep.length < (INT_MAX >> 2)) { ++ if (rep.length > 0 && rep.length < (INT_MAX >> 2)) { + rlen = rep.length << 2; + ch = Xmalloc(rlen + 1); + /* +1 to leave room for last null-terminator */ +@@ -93,11 +93,22 @@ int *actualCount) /* RETURN */ + if (ch + length < chend) { + flist[i] = ch + 1; /* skip over length */ + ch += length + 1; /* find next length ... */ +- length = *(unsigned char *)ch; +- *ch = '\0'; /* and replace with null-termination */ +- count++; +- } else +- flist[i] = NULL; ++ if (ch <= chend) { ++ length = *(unsigned char *)ch; ++ *ch = '\0'; /* and replace with null-termination */ ++ count++; ++ } else { ++ Xfree(flist); ++ flist = NULL; ++ count = 0; ++ break; ++ } ++ } else { ++ Xfree(flist); ++ flist = NULL; ++ count = 0; ++ break; ++ } + } + } + *actualCount = count; +diff --git a/src/ListExt.c b/src/ListExt.c +index be6b989..0516e45 100644 +--- a/src/ListExt.c ++++ b/src/ListExt.c +@@ -55,7 +55,7 @@ char **XListExtensions( + + if (rep.nExtensions) { + list = Xmalloc (rep.nExtensions * sizeof (char *)); +- if (rep.length < (INT_MAX >> 2)) { ++ if (rep.length > 0 && rep.length < (INT_MAX >> 2)) { + rlen = rep.length << 2; + ch = Xmalloc (rlen + 1); + /* +1 to leave room for last null-terminator */ +@@ -80,9 +80,13 @@ char **XListExtensions( + if (ch + length < chend) { + list[i] = ch+1; /* skip over length */ + ch += length + 1; /* find next length ... */ +- length = *ch; +- *ch = '\0'; /* and replace with null-termination */ +- count++; ++ if (ch <= chend) { ++ length = *ch; ++ *ch = '\0'; /* and replace with null-termination */ ++ count++; ++ } else { ++ list[i] = NULL; ++ } + } else + list[i] = NULL; + } +diff --git a/src/ModMap.c b/src/ModMap.c +index a809aa2..49a5d08 100644 +--- a/src/ModMap.c ++++ b/src/ModMap.c +@@ -42,7 +42,8 @@ XGetModifierMapping(register Display *dpy) + GetEmptyReq(GetModifierMapping, req); + (void) _XReply (dpy, (xReply *)&rep, 0, xFalse); + +- if (rep.length < (INT_MAX >> 2)) { ++ if (rep.length < (INT_MAX >> 2) && ++ (rep.length >> 1) == rep.numKeyPerModifier) { + nbytes = (unsigned long)rep.length << 2; + res = Xmalloc(sizeof (XModifierKeymap)); + if (res) +-- +cgit v0.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11_1.6.3.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11_1.6.3.bb index 8e531c745..23a77891e 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11_1.6.3.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libx11_1.6.3.bb @@ -5,6 +5,8 @@ BBCLASSEXTEND = "native nativesdk" SRC_URI += "file://disable_tests.patch \ file://libX11-Add-missing-NULL-check.patch \ + file://CVE-2016-7942.patch \ + file://CVE-2016-7943.patch \ " SRC_URI[md5sum] = "2e36b73f8a42143142dda8129f02e4e0" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr/CVE-2016-7947_CVE-2016-7948.patch b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr/CVE-2016-7947_CVE-2016-7948.patch new file mode 100644 index 000000000..a9b3dbc65 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr/CVE-2016-7947_CVE-2016-7948.patch @@ -0,0 +1,439 @@ +From a0df3e1c7728205e5c7650b2e6dce684139254a6 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann <tobias@stoeckmann.org> +Date: Sun, 25 Sep 2016 22:21:40 +0200 +Subject: Avoid out of boundary accesses on illegal responses + +The responses of the connected X server have to be properly checked +to avoid out of boundary accesses that could otherwise be triggered +by a malicious server. + +CVE: CVE-2016-7947 +libXrandr: Insufficient validation of server responses result in Integer overflows + +CVE: CVE-2016-7948 +libXrandr: Insufficient validation of server responses result in various data mishandlings + +Upstream-Status: Backport + +Signed-off-by: Tobias Stoeckmann <tobias@stoeckmann.org> +Reviewed-by: Matthieu Herrb <matthieu@herrb.eu> +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> + +diff --git a/src/XrrConfig.c b/src/XrrConfig.c +index 2f0282b..e68c45a 100644 +--- a/src/XrrConfig.c ++++ b/src/XrrConfig.c +@@ -29,6 +29,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -272,23 +273,30 @@ static XRRScreenConfiguration *_XRRGetScreenInfo (Display *dpy, + rep.rate = 0; + rep.nrateEnts = 0; + } ++ if (rep.length < INT_MAX >> 2) { ++ nbytes = (long) rep.length << 2; + +- nbytes = (long) rep.length << 2; ++ nbytesRead = (long) (rep.nSizes * SIZEOF (xScreenSizes) + ++ ((rep.nrateEnts + 1)& ~1) * 2 /* SIZEOF(CARD16) */); + +- nbytesRead = (long) (rep.nSizes * SIZEOF (xScreenSizes) + +- ((rep.nrateEnts + 1)& ~1) * 2 /* SIZEOF (CARD16) */); ++ /* ++ * first we must compute how much space to allocate for ++ * randr library's use; we'll allocate the structures in a single ++ * allocation, on cleanlyness grounds. ++ */ + +- /* +- * first we must compute how much space to allocate for +- * randr library's use; we'll allocate the structures in a single +- * allocation, on cleanlyness grounds. +- */ ++ rbytes = sizeof (XRRScreenConfiguration) + ++ (rep.nSizes * sizeof (XRRScreenSize) + ++ rep.nrateEnts * sizeof (int)); + +- rbytes = sizeof (XRRScreenConfiguration) + +- (rep.nSizes * sizeof (XRRScreenSize) + +- rep.nrateEnts * sizeof (int)); ++ scp = (struct _XRRScreenConfiguration *) Xmalloc(rbytes); ++ } else { ++ nbytes = 0; ++ nbytesRead = 0; ++ rbytes = 0; ++ scp = NULL; ++ } + +- scp = (struct _XRRScreenConfiguration *) Xmalloc(rbytes); + if (scp == NULL) { + _XEatData (dpy, (unsigned long) nbytes); + return NULL; +diff --git a/src/XrrCrtc.c b/src/XrrCrtc.c +index 5ae35c5..6665092 100644 +--- a/src/XrrCrtc.c ++++ b/src/XrrCrtc.c +@@ -24,6 +24,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -57,22 +58,33 @@ XRRGetCrtcInfo (Display *dpy, XRRScreenResources *resources, RRCrtc crtc) + return NULL; + } + +- nbytes = (long) rep.length << 2; ++ if (rep.length < INT_MAX >> 2) ++ { ++ nbytes = (long) rep.length << 2; + +- nbytesRead = (long) (rep.nOutput * 4 + +- rep.nPossibleOutput * 4); ++ nbytesRead = (long) (rep.nOutput * 4 + ++ rep.nPossibleOutput * 4); + +- /* +- * first we must compute how much space to allocate for +- * randr library's use; we'll allocate the structures in a single +- * allocation, on cleanlyness grounds. +- */ ++ /* ++ * first we must compute how much space to allocate for ++ * randr library's use; we'll allocate the structures in a single ++ * allocation, on cleanlyness grounds. ++ */ + +- rbytes = (sizeof (XRRCrtcInfo) + +- rep.nOutput * sizeof (RROutput) + +- rep.nPossibleOutput * sizeof (RROutput)); ++ rbytes = (sizeof (XRRCrtcInfo) + ++ rep.nOutput * sizeof (RROutput) + ++ rep.nPossibleOutput * sizeof (RROutput)); ++ ++ xci = (XRRCrtcInfo *) Xmalloc(rbytes); ++ } ++ else ++ { ++ nbytes = 0; ++ nbytesRead = 0; ++ rbytes = 0; ++ xci = NULL; ++ } + +- xci = (XRRCrtcInfo *) Xmalloc(rbytes); + if (xci == NULL) { + _XEatDataWords (dpy, rep.length); + UnlockDisplay (dpy); +@@ -194,12 +206,21 @@ XRRGetCrtcGamma (Display *dpy, RRCrtc crtc) + if (!_XReply (dpy, (xReply *) &rep, 0, xFalse)) + goto out; + +- nbytes = (long) rep.length << 2; ++ if (rep.length < INT_MAX >> 2) ++ { ++ nbytes = (long) rep.length << 2; + +- /* three channels of CARD16 data */ +- nbytesRead = (rep.size * 2 * 3); ++ /* three channels of CARD16 data */ ++ nbytesRead = (rep.size * 2 * 3); + +- crtc_gamma = XRRAllocGamma (rep.size); ++ crtc_gamma = XRRAllocGamma (rep.size); ++ } ++ else ++ { ++ nbytes = 0; ++ nbytesRead = 0; ++ crtc_gamma = NULL; ++ } + + if (!crtc_gamma) + { +@@ -357,7 +378,7 @@ XRRGetCrtcTransform (Display *dpy, + xRRGetCrtcTransformReq *req; + int major_version, minor_version; + XRRCrtcTransformAttributes *attr; +- char *extra = NULL, *e; ++ char *extra = NULL, *end = NULL, *e; + int p; + + *attributes = NULL; +@@ -395,9 +416,17 @@ XRRGetCrtcTransform (Display *dpy, + else + { + int extraBytes = rep.length * 4 - CrtcTransformExtra; +- extra = Xmalloc (extraBytes); ++ if (rep.length < INT_MAX / 4 && ++ rep.length * 4 >= CrtcTransformExtra) { ++ extra = Xmalloc (extraBytes); ++ end = extra + extraBytes; ++ } else ++ extra = NULL; + if (!extra) { +- _XEatDataWords (dpy, rep.length - (CrtcTransformExtra >> 2)); ++ if (rep.length > (CrtcTransformExtra >> 2)) ++ _XEatDataWords (dpy, rep.length - (CrtcTransformExtra >> 2)); ++ else ++ _XEatDataWords (dpy, rep.length); + UnlockDisplay (dpy); + SyncHandle (); + return False; +@@ -429,22 +458,38 @@ XRRGetCrtcTransform (Display *dpy, + + e = extra; + ++ if (e + rep.pendingNbytesFilter > end) { ++ XFree (extra); ++ return False; ++ } + memcpy (attr->pendingFilter, e, rep.pendingNbytesFilter); + attr->pendingFilter[rep.pendingNbytesFilter] = '\0'; + e += (rep.pendingNbytesFilter + 3) & ~3; + for (p = 0; p < rep.pendingNparamsFilter; p++) { + INT32 f; ++ if (e + 4 > end) { ++ XFree (extra); ++ return False; ++ } + memcpy (&f, e, 4); + e += 4; + attr->pendingParams[p] = (XFixed) f; + } + attr->pendingNparams = rep.pendingNparamsFilter; + ++ if (e + rep.currentNbytesFilter > end) { ++ XFree (extra); ++ return False; ++ } + memcpy (attr->currentFilter, e, rep.currentNbytesFilter); + attr->currentFilter[rep.currentNbytesFilter] = '\0'; + e += (rep.currentNbytesFilter + 3) & ~3; + for (p = 0; p < rep.currentNparamsFilter; p++) { + INT32 f; ++ if (e + 4 > end) { ++ XFree (extra); ++ return False; ++ } + memcpy (&f, e, 4); + e += 4; + attr->currentParams[p] = (XFixed) f; +diff --git a/src/XrrMonitor.c b/src/XrrMonitor.c +index a9eaa7b..adc5330 100644 +--- a/src/XrrMonitor.c ++++ b/src/XrrMonitor.c +@@ -24,6 +24,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -65,6 +66,15 @@ XRRGetMonitors(Display *dpy, Window window, Bool get_active, int *nmonitors) + return NULL; + } + ++ if (rep.length > INT_MAX >> 2 || ++ rep.nmonitors > INT_MAX / SIZEOF(xRRMonitorInfo) || ++ rep.noutputs > INT_MAX / 4 || ++ rep.nmonitors * SIZEOF(xRRMonitorInfo) > INT_MAX - rep.noutputs * 4) { ++ _XEatData (dpy, rep.length); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return NULL; ++ } + nbytes = (long) rep.length << 2; + nmon = rep.nmonitors; + noutput = rep.noutputs; +@@ -111,6 +121,14 @@ XRRGetMonitors(Display *dpy, Window window, Bool get_active, int *nmonitors) + mon[m].outputs = output; + buf += SIZEOF (xRRMonitorInfo); + xoutput = (CARD32 *) buf; ++ if (xmon->noutput > rep.noutputs) { ++ Xfree(buf); ++ Xfree(mon); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return NULL; ++ } ++ rep.noutputs -= xmon->noutput; + for (o = 0; o < xmon->noutput; o++) + output[o] = xoutput[o]; + output += xmon->noutput; +diff --git a/src/XrrOutput.c b/src/XrrOutput.c +index 85f0b6e..30f3d40 100644 +--- a/src/XrrOutput.c ++++ b/src/XrrOutput.c +@@ -25,6 +25,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -60,6 +61,16 @@ XRRGetOutputInfo (Display *dpy, XRRScreenResources *resources, RROutput output) + return NULL; + } + ++ if (rep.length > INT_MAX >> 2 || rep.length < (OutputInfoExtra >> 2)) ++ { ++ if (rep.length > (OutputInfoExtra >> 2)) ++ _XEatDataWords (dpy, rep.length - (OutputInfoExtra >> 2)); ++ else ++ _XEatDataWords (dpy, rep.length); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return NULL; ++ } + nbytes = ((long) (rep.length) << 2) - OutputInfoExtra; + + nbytesRead = (long) (rep.nCrtcs * 4 + +diff --git a/src/XrrProvider.c b/src/XrrProvider.c +index 9e620c7..d796cd0 100644 +--- a/src/XrrProvider.c ++++ b/src/XrrProvider.c +@@ -25,6 +25,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -59,12 +60,20 @@ XRRGetProviderResources(Display *dpy, Window window) + return NULL; + } + +- nbytes = (long) rep.length << 2; ++ if (rep.length < INT_MAX >> 2) { ++ nbytes = (long) rep.length << 2; + +- nbytesRead = (long) (rep.nProviders * 4); ++ nbytesRead = (long) (rep.nProviders * 4); + +- rbytes = (sizeof(XRRProviderResources) + rep.nProviders * sizeof(RRProvider)); +- xrpr = (XRRProviderResources *) Xmalloc(rbytes); ++ rbytes = (sizeof(XRRProviderResources) + rep.nProviders * ++ sizeof(RRProvider)); ++ xrpr = (XRRProviderResources *) Xmalloc(rbytes); ++ } else { ++ nbytes = 0; ++ nbytesRead = 0; ++ rbytes = 0; ++ xrpr = NULL; ++ } + + if (xrpr == NULL) { + _XEatDataWords (dpy, rep.length); +@@ -121,6 +130,17 @@ XRRGetProviderInfo(Display *dpy, XRRScreenResources *resources, RRProvider provi + return NULL; + } + ++ if (rep.length > INT_MAX >> 2 || rep.length < ProviderInfoExtra >> 2) ++ { ++ if (rep.length < ProviderInfoExtra >> 2) ++ _XEatDataWords (dpy, rep.length); ++ else ++ _XEatDataWords (dpy, rep.length - (ProviderInfoExtra >> 2)); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return NULL; ++ } ++ + nbytes = ((long) rep.length << 2) - ProviderInfoExtra; + + nbytesRead = (long)(rep.nCrtcs * 4 + +diff --git a/src/XrrScreen.c b/src/XrrScreen.c +index b8ce7e5..1f7ffe6 100644 +--- a/src/XrrScreen.c ++++ b/src/XrrScreen.c +@@ -24,6 +24,7 @@ + #include <config.h> + #endif + ++#include <limits.h> + #include <stdio.h> + #include <X11/Xlib.h> + /* we need to be able to manipulate the Display structure on events */ +@@ -105,27 +106,36 @@ doGetScreenResources (Display *dpy, Window window, int poll) + xrri->has_rates = _XRRHasRates (xrri->minor_version, xrri->major_version); + } + +- nbytes = (long) rep.length << 2; ++ if (rep.length < INT_MAX >> 2) { ++ nbytes = (long) rep.length << 2; + +- nbytesRead = (long) (rep.nCrtcs * 4 + +- rep.nOutputs * 4 + +- rep.nModes * SIZEOF (xRRModeInfo) + +- ((rep.nbytesNames + 3) & ~3)); ++ nbytesRead = (long) (rep.nCrtcs * 4 + ++ rep.nOutputs * 4 + ++ rep.nModes * SIZEOF (xRRModeInfo) + ++ ((rep.nbytesNames + 3) & ~3)); + +- /* +- * first we must compute how much space to allocate for +- * randr library's use; we'll allocate the structures in a single +- * allocation, on cleanlyness grounds. +- */ ++ /* ++ * first we must compute how much space to allocate for ++ * randr library's use; we'll allocate the structures in a single ++ * allocation, on cleanlyness grounds. ++ */ ++ ++ rbytes = (sizeof (XRRScreenResources) + ++ rep.nCrtcs * sizeof (RRCrtc) + ++ rep.nOutputs * sizeof (RROutput) + ++ rep.nModes * sizeof (XRRModeInfo) + ++ rep.nbytesNames + rep.nModes); /* '\0' terminate names */ + +- rbytes = (sizeof (XRRScreenResources) + +- rep.nCrtcs * sizeof (RRCrtc) + +- rep.nOutputs * sizeof (RROutput) + +- rep.nModes * sizeof (XRRModeInfo) + +- rep.nbytesNames + rep.nModes); /* '\0' terminate names */ ++ xrsr = (XRRScreenResources *) Xmalloc(rbytes); ++ wire_names = (char *) Xmalloc (rep.nbytesNames); ++ } else { ++ nbytes = 0; ++ nbytesRead = 0; ++ rbytes = 0; ++ xrsr = NULL; ++ wire_names = NULL; ++ } + +- xrsr = (XRRScreenResources *) Xmalloc(rbytes); +- wire_names = (char *) Xmalloc (rep.nbytesNames); + if (xrsr == NULL || wire_names == NULL) { + Xfree (xrsr); + Xfree (wire_names); +@@ -174,6 +184,14 @@ doGetScreenResources (Display *dpy, Window window, int poll) + wire_name = wire_names; + for (i = 0; i < rep.nModes; i++) { + xrsr->modes[i].name = names; ++ if (xrsr->modes[i].nameLength > rep.nbytesNames) { ++ Xfree (xrsr); ++ Xfree (wire_names); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return NULL; ++ } ++ rep.nbytesNames -= xrsr->modes[i].nameLength; + memcpy (names, wire_name, xrsr->modes[i].nameLength); + names[xrsr->modes[i].nameLength] = '\0'; + names += xrsr->modes[i].nameLength + 1; +-- +cgit v0.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr_1.5.0.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr_1.5.0.bb index abbbae552..35c60b419 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr_1.5.0.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrandr_1.5.0.bb @@ -19,5 +19,8 @@ XORG_PN = "libXrandr" BBCLASSEXTEND = "native nativesdk" +SRC_URI += "file://CVE-2016-7947_CVE-2016-7948.patch \ + " + SRC_URI[md5sum] = "309762867e41c6fd813da880d8a1bc93" SRC_URI[sha256sum] = "6f864959b7fc35db11754b270d71106ef5b5cf363426aa58589cb8ac8266de58" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender/CVE-2016-7949.patch b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender/CVE-2016-7949.patch new file mode 100644 index 000000000..73315b108 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender/CVE-2016-7949.patch @@ -0,0 +1,59 @@ +From 9362c7ddd1af3b168953d0737877bc52d79c94f4 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann <tobias@stoeckmann.org> +Date: Sun, 25 Sep 2016 21:43:09 +0200 +Subject: Validate lengths while parsing server data. + +Individual lengths inside received server data can overflow +the previously reserved memory. + +It is therefore important to validate every single length +field to not overflow the previously agreed sum of all invidual +length fields. + +v2: consume remaining bytes in the reply buffer on error. + +CVE: CVE-2016-7949 +Upstream-Status: Backport + +Signed-off-by: Tobias Stoeckmann <tobias@stoeckmann.org> +Reviewed-by: Matthieu Herrb@laas.fr +Signed-off-by: Sona Sarmadi <sona.sarmadi@enea.com> + +diff --git a/src/Xrender.c b/src/Xrender.c +index 3102eb2..71cf3e6 100644 +--- a/src/Xrender.c ++++ b/src/Xrender.c +@@ -533,12 +533,30 @@ XRenderQueryFormats (Display *dpy) + screen->fallback = _XRenderFindFormat (xri, xScreen->fallback); + screen->subpixel = SubPixelUnknown; + xDepth = (xPictDepth *) (xScreen + 1); ++ if (screen->ndepths > rep.numDepths) { ++ Xfree (xri); ++ Xfree (xData); ++ _XEatDataWords (dpy, rep.length); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return 0; ++ } ++ rep.numDepths -= screen->ndepths; + for (nd = 0; nd < screen->ndepths; nd++) + { + depth->depth = xDepth->depth; + depth->nvisuals = xDepth->nPictVisuals; + depth->visuals = visual; + xVisual = (xPictVisual *) (xDepth + 1); ++ if (depth->nvisuals > rep.numVisuals) { ++ Xfree (xri); ++ Xfree (xData); ++ _XEatDataWords (dpy, rep.length); ++ UnlockDisplay (dpy); ++ SyncHandle (); ++ return 0; ++ } ++ rep.numVisuals -= depth->nvisuals; + for (nv = 0; nv < depth->nvisuals; nv++) + { + visual->visual = _XRenderFindVisual (dpy, xVisual->visual); +-- +cgit v0.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender_0.9.9.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender_0.9.9.bb index 44cb2e0eb..eac367906 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender_0.9.9.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxrender_0.9.9.bb @@ -19,5 +19,8 @@ XORG_PN = "libXrender" BBCLASSEXTEND = "native nativesdk" +SRC_URI += "file://CVE-2016-7949.patch \ + " + SRC_URI[md5sum] = "5db92962b124ca3a8147daae4adbd622" SRC_URI[sha256sum] = "fc2fe57980a14092426dffcd1f2d9de0987b9d40adea663bd70d6342c0e9be1a" diff --git a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxtst_1.2.2.bb b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxtst_1.2.3.bb index 1b0bcf391..31ea4392d 100644 --- a/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxtst_1.2.2.bb +++ b/import-layers/yocto-poky/meta/recipes-graphics/xorg-lib/libxtst_1.2.3.bb @@ -16,5 +16,5 @@ PE = "1" XORG_PN = "libXtst" -SRC_URI[md5sum] = "25c6b366ac3dc7a12c5d79816ce96a59" -SRC_URI[sha256sum] = "ef0a7ffd577e5f1a25b1663b375679529663a1880151beaa73e9186c8309f6d9" +SRC_URI[md5sum] = "ef8c2c1d16a00bd95b9fdcef63b8a2ca" +SRC_URI[sha256sum] = "4655498a1b8e844e3d6f21f3b2c4e2b571effb5fd83199d428a6ba7ea4bf5204" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/cryptodev/cryptodev.inc b/import-layers/yocto-poky/meta/recipes-kernel/cryptodev/cryptodev.inc index 160ab3084..22c6977ef 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/cryptodev/cryptodev.inc +++ b/import-layers/yocto-poky/meta/recipes-kernel/cryptodev/cryptodev.inc @@ -3,7 +3,7 @@ HOMEPAGE = "http://cryptodev-linux.org/" LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" -SRC_URI = "http://download.gna.org/cryptodev-linux/cryptodev-linux-${PV}.tar.gz \ +SRC_URI = "http://nwl.cc/pub/cryptodev-linux/cryptodev-linux-${PV}.tar.gz \ file://06d6b560c6e45dc317dae47c74706fa43f4a31d8.patch \ file://cb186f682679383e8b5806240927903730ce85d9.patch" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb b/import-layers/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb index d8041ddd3..8af01559d 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb @@ -4,7 +4,7 @@ LIC_FILES_CHKSUM = "file://git/tools/kgit;beginline=5;endline=9;md5=a6c2fa8aef1b DEPENDS = "git-native" -SRCREV = "9a3995ee8daabf37e92e1b51b133cf8582d85809" +SRCREV = "85564e69555b713c2759d58ec5ade54424d051d8" PR = "r12" PV = "0.2+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-dtb.inc b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-dtb.inc index 8528d646d..2f378a1db 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-dtb.inc +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-dtb.inc @@ -68,7 +68,7 @@ pkg_postinst_kernel-devicetree () { for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME} DTB_EXT=${DTB##*.} - DTB_BASE_NAME=`basename ${DTB} | awk -F "." '{print $1}'` + DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"` DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"` update-alternatives --install /${KERNEL_IMAGEDEST}/${DTB_BASE_NAME}.${DTB_EXT} ${DTB_BASE_NAME}.${DTB_EXT} /boot/devicetree-${DTB_SYMLINK_NAME}.${DTB_EXT} ${KERNEL_PRIORITY} || true done @@ -81,7 +81,7 @@ pkg_postrm_kernel-devicetree () { for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME} DTB_EXT=${DTB##*.} - DTB_BASE_NAME=`basename ${DTB} | awk -F "." '{print $1}'` + DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"` DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"` update-alternatives --remove ${DTB_BASE_NAME}.${DTB_EXT} /boot/devicetree-${DTB_SYMLINK_NAME}.${DTB_EXT} ${KERNEL_PRIORITY} || true done diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-dev.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-dev.bb index 9154bb7c3..0cda553af 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-dev.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-dev.bb @@ -28,7 +28,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto-dev.git;branch=${KBRANCH};name SRCREV_machine ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' SRCREV_meta ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' -LINUX_VERSION ?= "4.8-rc+" +LINUX_VERSION ?= "4.9-rc+" LINUX_VERSION_EXTENSION ?= "-yoctodev-${LINUX_KERNEL_TYPE}" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb index b95fb5857..0e413f006 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.1.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipPackage("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "966ddde490030166010c5770f8f86cdd0e961c76" -SRCREV_meta ?= "3c3197e65b6f2f5514853c1fe78ae8ffc131b02c" +SRCREV_machine ?= "f793c71d51277bdb14d6854c8cbcc09b4f5936b4" +SRCREV_meta ?= "7140ddb86e4b01529185e6d4a606001ad152b8f3" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.1.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.1;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.1.33" +LINUX_VERSION ?= "4.1.38" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.4.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.4.bb index 6c1138277..1f73f0c75 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.4.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.4.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipPackage("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "652b564985db555b549ef73405aea6c38919eefc" -SRCREV_meta ?= "3030330b066a33ce21164a8b30d0503cf9f68e5b" +SRCREV_machine ?= "1af95315c6ab3672c8c7191873ef041a6c29ad70" +SRCREV_meta ?= "d6733af2080f8c0775569adc0826eb0c8954fc5e" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.4.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.4;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.4.26" +LINUX_VERSION ?= "4.4.60" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.8.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.8.bb index e51c9cdcc..e65bc18e6 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.8.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-rt_4.8.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipPackage("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "4057556c041f6aac0d29aa3425587d414c9a0090" -SRCREV_meta ?= "83110d94edeb856a3667b62903ed4ae91c24117d" +SRCREV_machine ?= "7f56c6365d0d677d5fe6ca73f1b2ab6640b523a3" +SRCREV_meta ?= "c84532b6475fd78b878507a481e2c04714341c07" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.8.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.8;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.8.3" +LINUX_VERSION ?= "4.8.24" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb index ba01702cb..ce8aea628 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.1.bb @@ -4,13 +4,13 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "4.1.33" +LINUX_VERSION ?= "4.1.38" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_meta ?= "3c3197e65b6f2f5514853c1fe78ae8ffc131b02c" +SRCREV_machine ?= "2ce56d130ddff67f43ec857cc51cd347666a0078" +SRCREV_meta ?= "7140ddb86e4b01529185e6d4a606001ad152b8f3" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.4.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.4.bb index 76c41639c..04f719c5d 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.4.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.4.bb @@ -4,13 +4,13 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "4.4.26" +LINUX_VERSION ?= "4.4.60" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_meta ?= "3030330b066a33ce21164a8b30d0503cf9f68e5b" +SRCREV_machine ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_meta ?= "d6733af2080f8c0775569adc0826eb0c8954fc5e" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.8.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.8.bb index c8ddbd93d..a0aa085be 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.8.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto-tiny_4.8.bb @@ -4,13 +4,13 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "4.8.3" +LINUX_VERSION ?= "4.8.24" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_meta ?= "83110d94edeb856a3667b62903ed4ae91c24117d" +SRCREV_machine ?= "6a134d2553b9c25ce8acb67b807fe0feb1b01430" +SRCREV_meta ?= "c84532b6475fd78b878507a481e2c04714341c07" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb index 788a8eaaa..bf7f266ee 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.1.bb @@ -11,20 +11,20 @@ KBRANCH_qemux86 ?= "standard/base" KBRANCH_qemux86-64 ?= "standard/base" KBRANCH_qemumips64 ?= "standard/mti-malta64" -SRCREV_machine_qemuarm ?= "d67ef485ce1420df11bda2d9f6fb78ef50c1adff" -SRCREV_machine_qemuarm64 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_qemumips ?= "65116339cfd210990c9c4710cdfec3ebd59abb0e" -SRCREV_machine_qemuppc ?= "30816907653b57f1f3d5f9a7a2f6339bab14a680" -SRCREV_machine_qemux86 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_qemux86-64 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_machine_qemumips64 ?= "f7a0b532b6ac81757d85b0c9a928f45a87c9e364" -SRCREV_machine ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae" -SRCREV_meta ?= "3c3197e65b6f2f5514853c1fe78ae8ffc131b02c" +SRCREV_machine_qemuarm ?= "4d2c95e78cdc7d312b7ab231ce90dce317f45df9" +SRCREV_machine_qemuarm64 ?= "2ce56d130ddff67f43ec857cc51cd347666a0078" +SRCREV_machine_qemumips ?= "81454f95166056a253c8950980e025ee243d8074" +SRCREV_machine_qemuppc ?= "42c41e606b70fd73a202f4146c0480f5624b0a0e" +SRCREV_machine_qemux86 ?= "2ce56d130ddff67f43ec857cc51cd347666a0078" +SRCREV_machine_qemux86-64 ?= "2ce56d130ddff67f43ec857cc51cd347666a0078" +SRCREV_machine_qemumips64 ?= "8a481005da41f82d2a40bf8cb40334547160ab5b" +SRCREV_machine ?= "2ce56d130ddff67f43ec857cc51cd347666a0078" +SRCREV_meta ?= "7140ddb86e4b01529185e6d4a606001ad152b8f3" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.1.git;name=machine;branch=${KBRANCH}; \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.1;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.1.33" +LINUX_VERSION ?= "4.1.38" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.4.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.4.bb index e3a3d901d..d0eed6559 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.4.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.4.bb @@ -11,20 +11,20 @@ KBRANCH_qemux86 ?= "standard/base" KBRANCH_qemux86-64 ?= "standard/base" KBRANCH_qemumips64 ?= "standard/mti-malta64" -SRCREV_machine_qemuarm ?= "187bcc13f3023c3ae0a3ba5c69ae85c4e5e693ac" -SRCREV_machine_qemuarm64 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_qemumips ?= "2f273556495dd2871f08c73fc3f40d1ad546c638" -SRCREV_machine_qemuppc ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_qemux86 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_qemux86-64 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_machine_qemumips64 ?= "0a19cacf5738876666a4b530a9fa14f05b355299" -SRCREV_machine ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473" -SRCREV_meta ?= "3030330b066a33ce21164a8b30d0503cf9f68e5b" +SRCREV_machine_qemuarm ?= "7d2a3c70d62f1e7f4eba571c49ff299db2bb3829" +SRCREV_machine_qemuarm64 ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_machine_qemumips ?= "6100965a51cf6b99f57cf8234aa982beb79455c9" +SRCREV_machine_qemuppc ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_machine_qemux86 ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_machine_qemux86-64 ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_machine_qemumips64 ?= "522e709fd7088e1a55e7a4708b1a07caa2ca4336" +SRCREV_machine ?= "0298d3765a5c474ff5776284d49111276510d4b4" +SRCREV_meta ?= "d6733af2080f8c0775569adc0826eb0c8954fc5e" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.4.git;name=machine;branch=${KBRANCH}; \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.4;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.4.26" +LINUX_VERSION ?= "4.4.60" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb index 13778b9c4..bfd52d24e 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/linux/linux-yocto_4.8.bb @@ -11,20 +11,20 @@ KBRANCH_qemux86 ?= "standard/base" KBRANCH_qemux86-64 ?= "standard/base" KBRANCH_qemumips64 ?= "standard/mti-malta64" -SRCREV_machine_qemuarm ?= "4cc544ad09ad704322cb66fe4ba197a6a05dc71f" -SRCREV_machine_qemuarm64 ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_qemumips ?= "c285969d4f9376a671167ecf397578c8ad3e6a75" -SRCREV_machine_qemuppc ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_qemux86 ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_qemux86-64 ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_machine_qemumips64 ?= "64f96ba530e58456070f26b0f3fcce3f64988b72" -SRCREV_machine ?= "1adf9d36338dc3c63cdbf6f98bcbdc7bba42a794" -SRCREV_meta ?= "83110d94edeb856a3667b62903ed4ae91c24117d" +SRCREV_machine_qemuarm ?= "f25e3a184bf0ac7b12ec9c98d71439f4ac911974" +SRCREV_machine_qemuarm64 ?= "b9c5f19c82c717b014eab5dc404b9489badbfc8f" +SRCREV_machine_qemumips ?= "79e11192ca2c1acc714214c2125a8c0296c00413" +SRCREV_machine_qemuppc ?= "7a688297cc810a614f0329371d1389e550a98504" +SRCREV_machine_qemux86 ?= "f6329fd2875778192c03e08be02730180cb0dc71" +SRCREV_machine_qemux86-64 ?= "f6329fd2875778192c03e08be02730180cb0dc71" +SRCREV_machine_qemumips64 ?= "d619311dd8ea9ee95d80d937f08fb2c70c1dc50c" +SRCREV_machine ?= "f6329fd2875778192c03e08be02730180cb0dc71" +SRCREV_meta ?= "c84532b6475fd78b878507a481e2c04714341c07" SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.8.git;name=machine;branch=${KBRANCH}; \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.8;destsuffix=${KMETA}" -LINUX_VERSION ?= "4.8.3" +LINUX_VERSION ?= "4.8.24" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_git.bb b/import-layers/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_git.bb index bfc657b3a..b00ce8d2e 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_git.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/lttng/lttng-tools_git.bb @@ -113,4 +113,7 @@ do_install_ptest () { # checkpatch.pl is unneeded on target and causes file-rdeps QA # warnings. rm -f ${D}${PTEST_PATH}/extras/checkpatch.pl + + # Remove built libraries as they confuse the packages' runtime dependency resolution + rm -rf ${D}${PTEST_PATH}/src/lib/lttng-ctl/.libs/ } diff --git a/import-layers/yocto-poky/meta/recipes-kernel/perf/perf.bb b/import-layers/yocto-poky/meta/recipes-kernel/perf/perf.bb index 471023d07..03ae4464e 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/perf/perf.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/perf/perf.bb @@ -66,6 +66,7 @@ SCRIPTING_DEFINES = "${@perf_feature_enabled('perf-scripting', '', 'NO_LIBPERL=1 TUI_DEFINES = "${@perf_feature_enabled('perf-tui', '', 'NO_NEWT=1',d)}" LIBUNWIND_DEFINES = "${@perf_feature_enabled('perf-libunwind', '', 'NO_LIBUNWIND=1 NO_LIBDW_DWARF_UNWIND=1',d)}" LIBNUMA_DEFINES = "${@perf_feature_enabled('perf-libnuma', '', 'NO_LIBNUMA=1',d)}" +SYSTEMTAP_DEFINES = "${@perf_feature_enabled('perf-systemtap', '', 'NO_SDT=1', d)}" # The LDFLAGS is required or some old kernels fails due missing # symbols and this is preferred than requiring patches to every old @@ -83,7 +84,7 @@ EXTRA_OEMAKE = '\ EXTRA_CFLAGS="-ldw" \ perfexecdir=${libexecdir} \ NO_GTK2=1 ${TUI_DEFINES} NO_DWARF=1 ${LIBUNWIND_DEFINES} \ - ${SCRIPTING_DEFINES} ${LIBNUMA_DEFINES} \ + ${SCRIPTING_DEFINES} ${LIBNUMA_DEFINES} ${SYSTEMTAP_DEFINES} \ ' EXTRA_OEMAKE += "\ diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Do-not-let-configure-write-a-python-location-into-th.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Do-not-let-configure-write-a-python-location-into-th.patch new file mode 100644 index 000000000..742b1187f --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Do-not-let-configure-write-a-python-location-into-th.patch @@ -0,0 +1,25 @@ +From ab29615ed6c2e779b472903564dc683dc1015de7 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin <alex.kanavin@gmail.com> +Date: Wed, 22 Feb 2017 13:37:33 +0200 +Subject: [PATCH] Do not let configure write a python location into the dtrace + binary + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com> +--- + dtrace.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/dtrace.in b/dtrace.in +index 5e1cf8079..a24229cbc 100644 +--- a/dtrace.in ++++ b/dtrace.in +@@ -1,4 +1,4 @@ +-#!@preferred_python@ ++#!/usr/bin/python3 + # vim: et sta sts=4 sw=4 ts=8 + + # This handles the systemtap equivalent of +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Install-python-modules-to-correct-library-dir.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Install-python-modules-to-correct-library-dir.patch new file mode 100644 index 000000000..528864cc9 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-Install-python-modules-to-correct-library-dir.patch @@ -0,0 +1,36 @@ +From 2ada22f05460223924efe54080cb4419e2b4c276 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin <alex.kanavin@gmail.com> +Date: Fri, 24 Feb 2017 17:53:02 +0200 +Subject: [PATCH] Install python modules to correct library dir. + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com> +--- + python/Makefile.am | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/python/Makefile.am b/python/Makefile.am +index a254480f9..efe9f3c01 100644 +--- a/python/Makefile.am ++++ b/python/Makefile.am +@@ -47,7 +47,7 @@ install-exec-local: + if HAVE_PYTHON2_PROBES + (cd $(srcdir); CFLAGS="$(AM_CPPFLAGS)" $(PYTHON) setup.py build \ + --build-base $(shell readlink -f $(builddir))/py2build \ +- install --prefix $(DESTDIR)$(prefix) \ ++ install --prefix $(DESTDIR)$(prefix) --install-lib=$(DESTDIR)${pythondir} \ + --single-version-externally-managed \ + --record $(shell readlink -f $(builddir))/py2build/install_files.txt \ + --verbose) +@@ -55,7 +55,7 @@ endif + if HAVE_PYTHON3_PROBES + (cd $(srcdir); CFLAGS="$(AM_CPPFLAGS)" $(PYTHON3) setup.py build \ + --build-base $(shell readlink -f $(builddir))/py3build \ +- install --prefix $(DESTDIR)$(prefix) \ ++ install --prefix $(DESTDIR)$(prefix) --install-lib=$(DESTDIR)${python3dir} \ + --single-version-externally-managed \ + --record $(shell readlink -f $(builddir))/py3build/install_files.txt \ + --verbose) +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-buildrun-remove-quotes-around-I-include-line.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-buildrun-remove-quotes-around-I-include-line.patch new file mode 100644 index 000000000..7996fdde7 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/0001-buildrun-remove-quotes-around-I-include-line.patch @@ -0,0 +1,38 @@ +From 75c4aec6de3a615909f3283eac585760de101b8c Mon Sep 17 00:00:00 2001 +From: Saul Wold <sgw@linux.intel.com> +Date: Tue, 7 Mar 2017 10:46:12 -0800 +Subject: [PATCH] buildrun: remove quotes around -I include line + +By having the quotes, the kernel Makefile addtree macro adds the +kernel $srctree directory as a prefix and causes compilation failures. +Removing the quotes resolves the issue. + +This is trimmed from the verbose output of the GCC command line +Before: + -I/srv/sdb/builds/4.9/tmp/work-shared/qemux86-64/kernel-source/"/srv/sdb/releases/jethro/builds/4.1/tmp/sysroots/x86_64-linux/usr/share/systemtap/runtime" + +After: + -I/srv/sdb/builds/4.9/tmp/sysroots/x86_64-linux/usr/share/systemtap/runtime + +Upstream-Status: Pending +Signed-off-by: Saul Wold <sgw@linux.intel.com> +--- + buildrun.cxx | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/buildrun.cxx b/buildrun.cxx +index aaea64c..8a8ee9f 100644 +--- a/buildrun.cxx ++++ b/buildrun.cxx +@@ -495,7 +495,7 @@ compile_pass (systemtap_session& s) + #if CHECK_POINTER_ARITH_PR5947 + o << "EXTRA_CFLAGS += -Wpointer-arith" << endl; + #endif +- o << "EXTRA_CFLAGS += -I\"" << s.runtime_path << "\"" << endl; ++ o << "EXTRA_CFLAGS += -I" << s.runtime_path << endl; + // XXX: this may help ppc toc overflow + // o << "CFLAGS := $(subst -Os,-O2,$(CFLAGS)) -fminimal-toc" << endl; + o << "obj-m := " << s.module_name << ".o" << endl; +-- +2.7.4 + diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/monitor-option.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/monitor-option.patch index 7d43a79e2..b7ee0701b 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/monitor-option.patch +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/monitor-option.patch @@ -1,21 +1,30 @@ +From 93fc4744fedf6fc593ee656968da97f7b1862ada Mon Sep 17 00:00:00 2001 +From: Ross Burton <ross.burton@intel.com> +Date: Tue, 4 Oct 2016 16:37:53 +0100 +Subject: [PATCH 4/6] systemtap: rationalise dependencies + Add an option to explicitly disable the monitor (and therefore the dependency on json-c and ncurses). Upstream-Status: Pending Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + configure.ac | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + diff --git a/configure.ac b/configure.ac -index cd781a2..e56079a 100644 +index 6bd0c5fc4..2ea9b3cbf 100644 --- a/configure.ac +++ b/configure.ac -@@ -570,13 +574,16 @@ dnl See if we have enough libraries and tools to build the virt server - fi - AM_CONDITIONAL([BUILD_VIRT], [test "${have_libvirt}" == "yes" -a "${have_libxml2}" == "yes" -a "$enable_virt" != "no"]) - +@@ -752,13 +752,16 @@ dnl We want either (or both) python probe support. + AM_CONDITIONAL([HAVE_PYTHON_PROBES], + [test "x$have_python2_support" = "xyes" -o "x$have_python3_support" = "xyes"]) + +AC_ARG_ENABLE([monitor], AS_HELP_STRING([--disable-monitor],[Disable monitor])) +if test "$enable_monitor" != "no"; then dnl Check for presence of json-c and ncurses for use in monitor mode - PKG_CHECK_MODULES([jsonc], [json-c], [have_jsonc=yes], [have_jsonc=no]) + PKG_CHECK_MODULES([jsonc], [json-c >= 0.12], [have_jsonc=yes], [have_jsonc=no]) PKG_CHECK_MODULES([ncurses], [ncurses], [have_ncurses=yes], [have_ncurses=no]) -AM_CONDITIONAL([HAVE_MONITOR_LIBS], [test "${have_jsonc}" == "yes" -a "${have_ncurses}" == "yes"]) if test "${have_jsonc}" == "yes" -a "${have_ncurses}" == yes; then @@ -23,6 +32,9 @@ index cd781a2..e56079a 100644 fi +fi +AM_CONDITIONAL([HAVE_MONITOR_LIBS], [test "${have_jsonc}" == "yes" -a "${have_ncurses}" == "yes" -a "$enable_monitor" != "no"]) - + AC_CACHE_CHECK([for assembler .section "?" flags support], stap_cv_sectionq, [ old_CFLAGS="$CFLAGS" +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/no-msgfmt-check.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/no-msgfmt-check.patch new file mode 100644 index 000000000..2c860b19e --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/no-msgfmt-check.patch @@ -0,0 +1,33 @@ +From 43f1b04449bb1cf7e0092263f1c2a25f3fca08ef Mon Sep 17 00:00:00 2001 +From: Ross Burton <ross.burton@intel.com> +Date: Tue, 8 Nov 2016 23:07:41 +0000 +Subject: [PATCH 5/6] systemtap: remove explicit msgfmt check + +There is no need to explicitly check that msgfmt was found as the gettext macros +handle this for us if NLS is enabled. + +Upstream-Status: Pending +Signed-off-by: Ross Burton <ross.burton@intel.com> + +--- + configure.ac | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 2ea9b3cbf..95417f59c 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -36,10 +36,6 @@ AC_CHECK_FUNCS(openat) + AM_GNU_GETTEXT(external) + AM_GNU_GETTEXT_VERSION([0.19.4]) + +-if test "x$GMSGFMT" = "x:"; then +- AC_MSG_ERROR([missing gnu /usr/bin/msgfmt]) +-fi +- + # We want the 'PYTHON' varible to be python version 2. We also want + # our custom 'PYTHON3' varible to be python version 3. + # +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/obsolete_automake_macros.patch b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/obsolete_automake_macros.patch deleted file mode 100644 index 988cda4f0..000000000 --- a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap/obsolete_automake_macros.patch +++ /dev/null @@ -1,15 +0,0 @@ -Upstream-Status: Pending - -Signed-off-by: Marko Lindqvist <cazfi74@gmail.com> -Index: git/configure.ac -=================================================================== ---- git.orig/configure.ac -+++ git/configure.ac -@@ -19,7 +19,6 @@ AC_PROG_LN_S - AC_PROG_CC - AC_PROG_CXX - AC_PROG_CPP --AM_PROG_CC_STDC - AM_PROG_CC_C_O - AC_PROG_RANLIB - AC_OBJEXT diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.bb b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.bb index 43bf69e87..fed368a38 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.bb +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.bb @@ -4,12 +4,12 @@ require systemtap_git.inc DEPENDS = "boost elfutils" -RDEPENDS_${PN} += "python3-core bash" +RDEPENDS_${PN} += "python3-core bash perl" EXTRA_OECONF += "--with-libelf=${STAGING_DIR_TARGET} --without-rpm \ --without-nss --without-avahi --without-dyninst \ --disable-server --disable-grapher --enable-prologues \ - --with-python3 \ + --with-python3 --without-python2-probes \ ac_cv_prog_have_javac=no \ ac_cv_prog_have_jar=no " @@ -17,11 +17,12 @@ STAP_DOCS ?= "--disable-docs --disable-publican --disable-refdocs" EXTRA_OECONF += "${STAP_DOCS} " -PACKAGECONFIG ??= "sqlite monitor" +PACKAGECONFIG ??= "sqlite monitor python3-probes" PACKAGECONFIG[libvirt] = "--enable-libvirt,--disable-libvirt,libvirt" PACKAGECONFIG[sqlite] = "--enable-sqlite,--disable-sqlite,sqlite3" PACKAGECONFIG[monitor] = "--enable-monitor,--disable-monitor,ncurses json-c" +PACKAGECONFIG[python3-probes] = "--with-python3-probes,--without-python3-probes,python3-setuptools-native" -inherit autotools gettext pkgconfig +inherit autotools gettext pkgconfig distutils3-base BBCLASSEXTEND = "native nativesdk" diff --git a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.inc b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.inc index 304eb9975..a6aedd38a 100644 --- a/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.inc +++ b/import-layers/yocto-poky/meta/recipes-kernel/systemtap/systemtap_git.inc @@ -1,14 +1,17 @@ LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" -SRCREV = "616ec7a0b916df7785d911b824c3df6eb022b213" -PV = "3.0" +SRCREV = "b8ea350dc13adb6190d9044a5b80110a4c441270" +PV = "3.1" SRC_URI = "git://sourceware.org/git/systemtap.git \ - file://obsolete_automake_macros.patch \ file://system_map_location.patch \ file://configure-allow-to-disable-libvirt.patch \ file://x32_abi_time.patch \ file://monitor-option.patch \ + file://no-msgfmt-check.patch \ + file://0001-Do-not-let-configure-write-a-python-location-into-th.patch \ + file://0001-Install-python-modules-to-correct-library-dir.patch \ + file://0001-buildrun-remove-quotes-around-I-include-line.patch \ " # systemtap doesn't support mips diff --git a/import-layers/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.56.bb b/import-layers/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.56.bb index 10c350ac2..277d69ff0 100644 --- a/import-layers/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.56.bb +++ b/import-layers/yocto-poky/meta/recipes-lsb4/libpng/libpng12_1.2.56.bb @@ -9,7 +9,11 @@ DEPENDS = "zlib" PN = "libpng12" S = "${WORKDIR}/libpng-${PV}" -SRC_URI = "${GENTOO_MIRROR}/libpng-${PV}.tar.xz" +LIBV = "12" + +SRC_URI = "${SOURCEFORGE_MIRROR}/project/libpng/libpng${LIBV}/${PV}/libpng-${PV}.tar.xz" + +MIRRORS += "${SOURCEFORGE_MIRROR}/project/libpng/libpng${LIBV}/${PV}/ ${SOURCEFORGE_MIRROR}/project/libpng/libpng${LIBV}/older-releases/${PV}" SRC_URI[md5sum] = "868562bd1c58b76ed8703f135a2e439a" SRC_URI[sha256sum] = "24ce54581468b937734a6ecc86f7e121bc46a90d76a0d948dca08f32ee000dbe" diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8665_8683.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8665_8683.patch deleted file mode 100644 index 39c5059c7..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8665_8683.patch +++ /dev/null @@ -1,137 +0,0 @@ -From f94a29a822f5528d2334592760fbb7938f15eb55 Mon Sep 17 00:00:00 2001 -From: erouault <erouault> -Date: Sat, 26 Dec 2015 17:32:03 +0000 -Subject: [PATCH] * libtiff/tif_getimage.c: fix out-of-bound reads in - TIFFRGBAImage interface in case of unsupported values of - SamplesPerPixel/ExtraSamples for LogLUV / CIELab. Add explicit call to - TIFFRGBAImageOK() in TIFFRGBAImageBegin(). Fix CVE-2015-8665 reported by - limingxing and CVE-2015-8683 reported by zzf of Alibaba. - -Upstream-Status: Backport -CVE: CVE-2015-8665 -CVE: CVE-2015-8683 -https://github.com/vadz/libtiff/commit/f94a29a822f5528d2334592760fbb7938f15eb55 - -Signed-off-by: Armin Kuster <akuster@mvista.com> - ---- - ChangeLog | 8 ++++++++ - libtiff/tif_getimage.c | 35 ++++++++++++++++++++++------------- - 2 files changed, 30 insertions(+), 13 deletions(-) - -Index: tiff-4.0.6/libtiff/tif_getimage.c -=================================================================== ---- tiff-4.0.6.orig/libtiff/tif_getimage.c -+++ tiff-4.0.6/libtiff/tif_getimage.c -@@ -182,20 +182,22 @@ TIFFRGBAImageOK(TIFF* tif, char emsg[102 - "Planarconfiguration", td->td_planarconfig); - return (0); - } -- if( td->td_samplesperpixel != 3 ) -+ if( td->td_samplesperpixel != 3 || colorchannels != 3 ) - { - sprintf(emsg, -- "Sorry, can not handle image with %s=%d", -- "Samples/pixel", td->td_samplesperpixel); -+ "Sorry, can not handle image with %s=%d, %s=%d", -+ "Samples/pixel", td->td_samplesperpixel, -+ "colorchannels", colorchannels); - return 0; - } - break; - case PHOTOMETRIC_CIELAB: -- if( td->td_samplesperpixel != 3 || td->td_bitspersample != 8 ) -+ if( td->td_samplesperpixel != 3 || colorchannels != 3 || td->td_bitspersample != 8 ) - { - sprintf(emsg, -- "Sorry, can not handle image with %s=%d and %s=%d", -+ "Sorry, can not handle image with %s=%d, %s=%d and %s=%d", - "Samples/pixel", td->td_samplesperpixel, -+ "colorchannels", colorchannels, - "Bits/sample", td->td_bitspersample); - return 0; - } -@@ -255,6 +257,9 @@ TIFFRGBAImageBegin(TIFFRGBAImage* img, T - int colorchannels; - uint16 *red_orig, *green_orig, *blue_orig; - int n_color; -+ -+ if( !TIFFRGBAImageOK(tif, emsg) ) -+ return 0; - - /* Initialize to normal values */ - img->row_offset = 0; -@@ -2508,29 +2513,33 @@ PickContigCase(TIFFRGBAImage* img) - case PHOTOMETRIC_RGB: - switch (img->bitspersample) { - case 8: -- if (img->alpha == EXTRASAMPLE_ASSOCALPHA) -+ if (img->alpha == EXTRASAMPLE_ASSOCALPHA && -+ img->samplesperpixel >= 4) - img->put.contig = putRGBAAcontig8bittile; -- else if (img->alpha == EXTRASAMPLE_UNASSALPHA) -+ else if (img->alpha == EXTRASAMPLE_UNASSALPHA && -+ img->samplesperpixel >= 4) - { - if (BuildMapUaToAa(img)) - img->put.contig = putRGBUAcontig8bittile; - } -- else -+ else if( img->samplesperpixel >= 3 ) - img->put.contig = putRGBcontig8bittile; - break; - case 16: -- if (img->alpha == EXTRASAMPLE_ASSOCALPHA) -+ if (img->alpha == EXTRASAMPLE_ASSOCALPHA && -+ img->samplesperpixel >=4 ) - { - if (BuildMapBitdepth16To8(img)) - img->put.contig = putRGBAAcontig16bittile; - } -- else if (img->alpha == EXTRASAMPLE_UNASSALPHA) -+ else if (img->alpha == EXTRASAMPLE_UNASSALPHA && -+ img->samplesperpixel >=4 ) - { - if (BuildMapBitdepth16To8(img) && - BuildMapUaToAa(img)) - img->put.contig = putRGBUAcontig16bittile; - } -- else -+ else if( img->samplesperpixel >=3 ) - { - if (BuildMapBitdepth16To8(img)) - img->put.contig = putRGBcontig16bittile; -@@ -2539,7 +2548,7 @@ PickContigCase(TIFFRGBAImage* img) - } - break; - case PHOTOMETRIC_SEPARATED: -- if (buildMap(img)) { -+ if (img->samplesperpixel >=4 && buildMap(img)) { - if (img->bitspersample == 8) { - if (!img->Map) - img->put.contig = putRGBcontig8bitCMYKtile; -@@ -2635,7 +2644,7 @@ PickContigCase(TIFFRGBAImage* img) - } - break; - case PHOTOMETRIC_CIELAB: -- if (buildMap(img)) { -+ if (img->samplesperpixel == 3 && buildMap(img)) { - if (img->bitspersample == 8) - img->put.contig = initCIELabConversion(img); - break; -Index: tiff-4.0.6/ChangeLog -=================================================================== ---- tiff-4.0.6.orig/ChangeLog -+++ tiff-4.0.6/ChangeLog -@@ -1,3 +1,11 @@ -+2015-12-26 Even Rouault <even.rouault at spatialys.com> -+ -+ * libtiff/tif_getimage.c: fix out-of-bound reads in TIFFRGBAImage -+ interface in case of unsupported values of SamplesPerPixel/ExtraSamples -+ for LogLUV / CIELab. Add explicit call to TIFFRGBAImageOK() in -+ TIFFRGBAImageBegin(). Fix CVE-2015-8665 reported by limingxing and -+ CVE-2015-8683 reported by zzf of Alibaba. -+ - 2015-09-12 Bob Friesenhahn <bfriesen@simple.dallas.tx.us> - - * libtiff 4.0.6 released. diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch deleted file mode 100644 index 0846f0f68..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8781.patch +++ /dev/null @@ -1,195 +0,0 @@ -From aaab5c3c9d2a2c6984f23ccbc79702610439bc65 Mon Sep 17 00:00:00 2001 -From: erouault <erouault> -Date: Sun, 27 Dec 2015 16:25:11 +0000 -Subject: [PATCH] * libtiff/tif_luv.c: fix potential out-of-bound writes in - decode functions in non debug builds by replacing assert()s by regular if - checks (bugzilla #2522). Fix potential out-of-bound reads in case of short - input data. - -Upstream-Status: Backport - -https://github.com/vadz/libtiff/commit/aaab5c3c9d2a2c6984f23ccbc79702610439bc65 -hand applied Changelog changes - -CVE: CVE-2015-8781 - -Signed-off-by: Armin Kuster <akuster@mvista.com> ---- - ChangeLog | 7 +++++++ - libtiff/tif_luv.c | 55 ++++++++++++++++++++++++++++++++++++++++++++----------- - 2 files changed, 51 insertions(+), 11 deletions(-) - -Index: tiff-4.0.4/ChangeLog -=================================================================== ---- tiff-4.0.4.orig/ChangeLog -+++ tiff-4.0.4/ChangeLog -@@ -1,3 +1,10 @@ -+2015-12-27 Even Rouault <even.rouault at spatialys.com> -+ -+ * libtiff/tif_luv.c: fix potential out-of-bound writes in decode -+ functions in non debug builds by replacing assert()s by regular if -+ checks (bugzilla #2522). -+ Fix potential out-of-bound reads in case of short input data. -+ - 2015-12-26 Even Rouault <even.rouault at spatialys.com> - - * libtiff/tif_getimage.c: fix out-of-bound reads in TIFFRGBAImage -Index: tiff-4.0.4/libtiff/tif_luv.c -=================================================================== ---- tiff-4.0.4.orig/libtiff/tif_luv.c -+++ tiff-4.0.4/libtiff/tif_luv.c -@@ -202,7 +202,11 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz - if (sp->user_datafmt == SGILOGDATAFMT_16BIT) - tp = (int16*) op; - else { -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - tp = (int16*) sp->tbuf; - } - _TIFFmemset((void*) tp, 0, npixels*sizeof (tp[0])); -@@ -211,9 +215,11 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz - cc = tif->tif_rawcc; - /* get each byte string */ - for (shft = 2*8; (shft -= 8) >= 0; ) { -- for (i = 0; i < npixels && cc > 0; ) -+ for (i = 0; i < npixels && cc > 0; ) { - if (*bp >= 128) { /* run */ -- rc = *bp++ + (2-128); /* TODO: potential input buffer overrun when decoding corrupt or truncated data */ -+ if( cc < 2 ) -+ break; -+ rc = *bp++ + (2-128); - b = (int16)(*bp++ << shft); - cc -= 2; - while (rc-- && i < npixels) -@@ -223,6 +229,7 @@ LogL16Decode(TIFF* tif, uint8* op, tmsiz - while (--cc && rc-- && i < npixels) - tp[i++] |= (int16)*bp++ << shft; - } -+ } - if (i != npixels) { - #if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__)) - TIFFErrorExt(tif->tif_clientdata, module, -@@ -268,13 +275,17 @@ LogLuvDecode24(TIFF* tif, uint8* op, tms - if (sp->user_datafmt == SGILOGDATAFMT_RAW) - tp = (uint32 *)op; - else { -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - tp = (uint32 *) sp->tbuf; - } - /* copy to array of uint32 */ - bp = (unsigned char*) tif->tif_rawcp; - cc = tif->tif_rawcc; -- for (i = 0; i < npixels && cc > 0; i++) { -+ for (i = 0; i < npixels && cc >= 3; i++) { - tp[i] = bp[0] << 16 | bp[1] << 8 | bp[2]; - bp += 3; - cc -= 3; -@@ -325,7 +336,11 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms - if (sp->user_datafmt == SGILOGDATAFMT_RAW) - tp = (uint32*) op; - else { -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - tp = (uint32*) sp->tbuf; - } - _TIFFmemset((void*) tp, 0, npixels*sizeof (tp[0])); -@@ -334,11 +349,13 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms - cc = tif->tif_rawcc; - /* get each byte string */ - for (shft = 4*8; (shft -= 8) >= 0; ) { -- for (i = 0; i < npixels && cc > 0; ) -+ for (i = 0; i < npixels && cc > 0; ) { - if (*bp >= 128) { /* run */ -+ if( cc < 2 ) -+ break; - rc = *bp++ + (2-128); - b = (uint32)*bp++ << shft; -- cc -= 2; /* TODO: potential input buffer overrun when decoding corrupt or truncated data */ -+ cc -= 2; - while (rc-- && i < npixels) - tp[i++] |= b; - } else { /* non-run */ -@@ -346,6 +363,7 @@ LogLuvDecode32(TIFF* tif, uint8* op, tms - while (--cc && rc-- && i < npixels) - tp[i++] |= (uint32)*bp++ << shft; - } -+ } - if (i != npixels) { - #if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__)) - TIFFErrorExt(tif->tif_clientdata, module, -@@ -413,6 +431,7 @@ LogLuvDecodeTile(TIFF* tif, uint8* bp, t - static int - LogL16Encode(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) - { -+ static const char module[] = "LogL16Encode"; - LogLuvState* sp = EncoderState(tif); - int shft; - tmsize_t i; -@@ -433,7 +452,11 @@ LogL16Encode(TIFF* tif, uint8* bp, tmsiz - tp = (int16*) bp; - else { - tp = (int16*) sp->tbuf; -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - (*sp->tfunc)(sp, bp, npixels); - } - /* compress each byte string */ -@@ -506,6 +529,7 @@ LogL16Encode(TIFF* tif, uint8* bp, tmsiz - static int - LogLuvEncode24(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) - { -+ static const char module[] = "LogLuvEncode24"; - LogLuvState* sp = EncoderState(tif); - tmsize_t i; - tmsize_t npixels; -@@ -521,7 +545,11 @@ LogLuvEncode24(TIFF* tif, uint8* bp, tms - tp = (uint32*) bp; - else { - tp = (uint32*) sp->tbuf; -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - (*sp->tfunc)(sp, bp, npixels); - } - /* write out encoded pixels */ -@@ -553,6 +581,7 @@ LogLuvEncode24(TIFF* tif, uint8* bp, tms - static int - LogLuvEncode32(TIFF* tif, uint8* bp, tmsize_t cc, uint16 s) - { -+ static const char module[] = "LogLuvEncode32"; - LogLuvState* sp = EncoderState(tif); - int shft; - tmsize_t i; -@@ -574,7 +603,11 @@ LogLuvEncode32(TIFF* tif, uint8* bp, tms - tp = (uint32*) bp; - else { - tp = (uint32*) sp->tbuf; -- assert(sp->tbuflen >= npixels); -+ if(sp->tbuflen < npixels) { -+ TIFFErrorExt(tif->tif_clientdata, module, -+ "Translation buffer too short"); -+ return (0); -+ } - (*sp->tfunc)(sp, bp, npixels); - } - /* compress each byte string */ diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch deleted file mode 100644 index 0caf800e2..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2015-8784.patch +++ /dev/null @@ -1,73 +0,0 @@ -From b18012dae552f85dcc5c57d3bf4e997a15b1cc1c Mon Sep 17 00:00:00 2001 -From: erouault <erouault> -Date: Sun, 27 Dec 2015 16:55:20 +0000 -Subject: [PATCH] * libtiff/tif_next.c: fix potential out-of-bound write in - NeXTDecode() triggered by http://lcamtuf.coredump.cx/afl/vulns/libtiff5.tif - (bugzilla #2508) - -Upstream-Status: Backport -https://github.com/vadz/libtiff/commit/b18012dae552f85dcc5c57d3bf4e997a15b1cc1c -hand applied Changelog changes - -CVE: CVE-2015-8784 -Signed-off-by: Armin Kuster <akuster@mvista.com> - ---- - ChangeLog | 6 ++++++ - libtiff/tif_next.c | 10 ++++++++-- - 2 files changed, 14 insertions(+), 2 deletions(-) - -Index: tiff-4.0.4/ChangeLog -=================================================================== ---- tiff-4.0.4.orig/ChangeLog -+++ tiff-4.0.4/ChangeLog -@@ -1,5 +1,11 @@ - 2015-12-27 Even Rouault <even.rouault at spatialys.com> - -+ * libtiff/tif_next.c: fix potential out-of-bound write in NeXTDecode() -+ triggered by http://lcamtuf.coredump.cx/afl/vulns/libtiff5.tif -+ (bugzilla #2508) -+ -+2015-12-27 Even Rouault <even.rouault at spatialys.com> -+ - * libtiff/tif_luv.c: fix potential out-of-bound writes in decode - functions in non debug builds by replacing assert()s by regular if - checks (bugzilla #2522). -Index: tiff-4.0.4/libtiff/tif_next.c -=================================================================== ---- tiff-4.0.4.orig/libtiff/tif_next.c -+++ tiff-4.0.4/libtiff/tif_next.c -@@ -37,7 +37,7 @@ - case 0: op[0] = (unsigned char) ((v) << 6); break; \ - case 1: op[0] |= (v) << 4; break; \ - case 2: op[0] |= (v) << 2; break; \ -- case 3: *op++ |= (v); break; \ -+ case 3: *op++ |= (v); op_offset++; break; \ - } \ - } - -@@ -106,6 +106,7 @@ NeXTDecode(TIFF* tif, uint8* buf, tmsize - uint32 imagewidth = tif->tif_dir.td_imagewidth; - if( isTiled(tif) ) - imagewidth = tif->tif_dir.td_tilewidth; -+ tmsize_t op_offset = 0; - - /* - * The scanline is composed of a sequence of constant -@@ -122,10 +123,15 @@ NeXTDecode(TIFF* tif, uint8* buf, tmsize - * bounds, potentially resulting in a security - * issue. - */ -- while (n-- > 0 && npixels < imagewidth) -+ while (n-- > 0 && npixels < imagewidth && op_offset < scanline) - SETPIXEL(op, grey); - if (npixels >= imagewidth) - break; -+ if (op_offset >= scanline ) { -+ TIFFErrorExt(tif->tif_clientdata, module, "Invalid data for scanline %ld", -+ (long) tif->tif_row); -+ return (0); -+ } - if (cc == 0) - goto bad; - n = *bp++, cc--; diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-3186.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-3186.patch deleted file mode 100644 index 4a08aba21..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-3186.patch +++ /dev/null @@ -1,24 +0,0 @@ -Buffer overflow in the readextension function in gif2tiff.c -allows remote attackers to cause a denial of service via a crafted GIF file. - -External References: -https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2016-3186 -https://bugzilla.redhat.com/show_bug.cgi?id=1319503 - -CVE: CVE-2016-3186 -Upstream-Status: Backport (RedHat) -https://bugzilla.redhat.com/attachment.cgi?id=1144235&action=diff - -Signed-off-by: Yi Zhao <yi.zhao@windirver.com> - ---- tiff-4.0.6/tools/gif2tiff.c 2016-04-06 15:43:01.586048341 +0200 -+++ tiff-4.0.6/tools/gif2tiff.c 2016-04-06 15:48:05.523207710 +0200 -@@ -349,7 +349,7 @@ - int status = 1; - - (void) getc(infile); -- while ((count = getc(infile)) && count <= 255) -+ while ((count = getc(infile)) && count >= 0 && count <= 255) - if (fread(buf, 1, count, infile) != (size_t) count) { - fprintf(stderr, "short read from file %s (%s)\n", - filename, strerror(errno)); diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5321.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5321.patch deleted file mode 100644 index 63c665024..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5321.patch +++ /dev/null @@ -1,49 +0,0 @@ -From d9783e4a1476b6787a51c5ae9e9b3156527589f0 Mon Sep 17 00:00:00 2001 -From: erouault <erouault> -Date: Mon, 11 Jul 2016 21:26:03 +0000 -Subject: [PATCH 1/2] * tools/tiffcrop.c: Avoid access outside of stack - allocated array on a tiled separate TIFF with more than 8 samples per pixel. - Reported by Kaixiang Zhang of the Cloud Security Team, Qihoo 360 - (CVE-2016-5321, bugzilla #2558) - -CVE: CVE-2016-5321 -Upstream-Status: Backport -https://github.com/vadz/libtiff/commit/d9783e4a1476b6787a51c5ae9e9b3156527589f0 - -Signed-off-by: Yi Zhao <yi.zhao@windirver.com> ---- - ChangeLog | 7 +++++++ - tools/tiffcrop.c | 2 +- - 2 files changed, 8 insertions(+), 1 deletion(-) - -diff --git a/ChangeLog b/ChangeLog -index e98d54d..4e0302f 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -1,3 +1,10 @@ -+2016-07-11 Even Rouault <even.rouault at spatialys.com> -+ -+ * tools/tiffcrop.c: Avoid access outside of stack allocated array -+ on a tiled separate TIFF with more than 8 samples per pixel. -+ Reported by Kaixiang Zhang of the Cloud Security Team, Qihoo 360 -+ (CVE-2016-5321, bugzilla #2558) -+ - 2015-12-27 Even Rouault <even.rouault at spatialys.com> - - * libtiff/tif_next.c: fix potential out-of-bound write in NeXTDecode() -diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c -index d959ae3..6fc8fc1 100644 ---- a/tools/tiffcrop.c -+++ b/tools/tiffcrop.c -@@ -989,7 +989,7 @@ static int readSeparateTilesIntoBuffer (TIFF* in, uint8 *obuf, - nrow = (row + tl > imagelength) ? imagelength - row : tl; - for (col = 0; col < imagewidth; col += tw) - { -- for (s = 0; s < spp; s++) -+ for (s = 0; s < spp && s < MAX_SAMPLES; s++) - { /* Read each plane of a tile set into srcbuffs[s] */ - tbytes = TIFFReadTile(in, srcbuffs[s], col, row, 0, s); - if (tbytes < 0 && !ignore) --- -2.7.4 - diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5323.patch b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5323.patch deleted file mode 100644 index 41eab91ab..000000000 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/files/CVE-2016-5323.patch +++ /dev/null @@ -1,107 +0,0 @@ -From 2f79856097f423eb33796a15fcf700d2ea41bf31 Mon Sep 17 00:00:00 2001 -From: erouault <erouault> -Date: Mon, 11 Jul 2016 21:38:31 +0000 -Subject: [PATCH 2/2] (CVE-2016-5321 / CVE-2016-5323 , bugzilla #2558 / #2559) - -CVE: CVE-2016-5323 -Upstream-Status: Backport -https://github.com/vadz/libtiff/commit/2f79856097f423eb33796a15fcf700d2ea41bf31 - -Signed-off-by: Yi Zhao <yi.zhao@windirver.com> ---- - ChangeLog | 2 +- - tools/tiffcrop.c | 16 ++++++++-------- - 2 files changed, 9 insertions(+), 9 deletions(-) - -diff --git a/ChangeLog b/ChangeLog -index 4e0302f..62dc1b5 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -3,7 +3,7 @@ - * tools/tiffcrop.c: Avoid access outside of stack allocated array - on a tiled separate TIFF with more than 8 samples per pixel. - Reported by Kaixiang Zhang of the Cloud Security Team, Qihoo 360 -- (CVE-2016-5321, bugzilla #2558) -+ (CVE-2016-5321 / CVE-2016-5323 , bugzilla #2558 / #2559) - - 2016-07-10 Even Rouault <even.rouault at spatialys.com> - -diff --git a/tools/tiffcrop.c b/tools/tiffcrop.c -index 6fc8fc1..27abc0b 100644 ---- a/tools/tiffcrop.c -+++ b/tools/tiffcrop.c -@@ -3738,7 +3738,7 @@ combineSeparateSamples8bits (uint8 *in[], uint8 *out, uint32 cols, - - matchbits = maskbits << (8 - src_bit - bps); - /* load up next sample from each plane */ -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - buff1 = ((*src) & matchbits) << (src_bit); -@@ -3837,7 +3837,7 @@ combineSeparateSamples16bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (16 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) -@@ -3947,7 +3947,7 @@ combineSeparateSamples24bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (32 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) -@@ -4073,7 +4073,7 @@ combineSeparateSamples32bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (64 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) -@@ -4263,7 +4263,7 @@ combineSeparateTileSamples8bits (uint8 *in[], uint8 *out, uint32 cols, - - matchbits = maskbits << (8 - src_bit - bps); - /* load up next sample from each plane */ -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - buff1 = ((*src) & matchbits) << (src_bit); -@@ -4362,7 +4362,7 @@ combineSeparateTileSamples16bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (16 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) -@@ -4471,7 +4471,7 @@ combineSeparateTileSamples24bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (32 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) -@@ -4597,7 +4597,7 @@ combineSeparateTileSamples32bits (uint8 *in[], uint8 *out, uint32 cols, - src_bit = bit_offset % 8; - - matchbits = maskbits << (64 - src_bit - bps); -- for (s = 0; s < spp; s++) -+ for (s = 0; (s < spp) && (s < MAX_SAMPLES); s++) - { - src = in[s] + src_offset + src_byte; - if (little_endian) --- -2.7.4 - diff --git a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.6.bb b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.7.bb index 8147bc4fb..729678208 100644 --- a/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.6.bb +++ b/import-layers/yocto-poky/meta/recipes-multimedia/libtiff/tiff_4.0.7.bb @@ -2,18 +2,14 @@ SUMMARY = "Provides support for the Tag Image File Format (TIFF)" LICENSE = "BSD-2-Clause" LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=34da3db46fab7501992f9615d7e158cf" +CVE_PRODUCT = "libtiff" + SRC_URI = "http://download.osgeo.org/libtiff/tiff-${PV}.tar.gz \ file://libtool2.patch \ - file://CVE-2015-8665_8683.patch \ - file://CVE-2015-8781.patch \ - file://CVE-2015-8784.patch \ - file://CVE-2016-3186.patch \ - file://CVE-2016-5321.patch \ - file://CVE-2016-5323.patch \ " -SRC_URI[md5sum] = "d1d2e940dea0b5ad435f21f03d96dd72" -SRC_URI[sha256sum] = "4d57a50907b510e3049a4bba0d7888930fdfc16ce49f1bf693e5b6247370d68c" +SRC_URI[md5sum] = "77ae928d2c6b7fb46a21c3a29325157b" +SRC_URI[sha256sum] = "9f43a2cfb9589e5cecaa66e16bf87f814c945f22df7ba600d63aac4632c4f019" # exclude betas UPSTREAM_CHECK_REGEX = "tiff-(?P<pver>\d+(\.\d+)+).tar" diff --git a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/hwlatdetect_2.0.bb b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/hwlatdetect_1.1.bb index 012b2dd0a..012b2dd0a 100644 --- a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/hwlatdetect_2.0.bb +++ b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/hwlatdetect_1.1.bb diff --git a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests.inc b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests.inc index d832828e9..5606d8e38 100644 --- a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests.inc +++ b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests.inc @@ -1,5 +1,6 @@ -# Version v0.96 -SRCREV = "e1b1537a20b35af75a49bf55dcf70296f8a62467" +# Version 1.1 +SRCREV = "dff174f994f547a5785d32454865f140daacb0f5" +PE = "1" SRC_URI = "git://git.kernel.org/pub/scm/utils/rt-tests/rt-tests.git" diff --git a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests_2.0.bb b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests_1.1.bb index 4336c50d6..4336c50d6 100644 --- a/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests_2.0.bb +++ b/import-layers/yocto-poky/meta/recipes-rt/rt-tests/rt-tests_1.1.bb diff --git a/import-layers/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.12.5.bb b/import-layers/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.12.5.bb index 941d0e206..11c91c1d0 100644 --- a/import-layers/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.12.5.bb +++ b/import-layers/yocto-poky/meta/recipes-sato/webkit/webkitgtk_2.12.5.bb @@ -24,7 +24,7 @@ SRC_URI = "\ SRC_URI[md5sum] = "7a9ea00ec195488db90fdeb2d174ddaf" SRC_URI[sha256sum] = "6b147854b864a5f115fadb97b2b6200b2f696db015216a34e7298d11c88b1c40" -inherit cmake lib_package pkgconfig gobject-introspection perlnative distro_features_check upstream-version-is-even gtk-doc +inherit cmake pkgconfig gobject-introspection perlnative distro_features_check upstream-version-is-even gtk-doc # We cannot inherit pythonnative because that would conflict with inheriting python3native # (which is done by gobject-introspection). But webkit only needs the path to native Python 2.x binary diff --git a/import-layers/yocto-poky/meta/recipes-support/attr/ea-acl.inc b/import-layers/yocto-poky/meta/recipes-support/attr/ea-acl.inc index 370e16f4a..b3ca65e68 100644 --- a/import-layers/yocto-poky/meta/recipes-support/attr/ea-acl.inc +++ b/import-layers/yocto-poky/meta/recipes-support/attr/ea-acl.inc @@ -10,6 +10,7 @@ inherit autotools-brokensep gettext EXTRA_AUTORECONF += "--exclude=autoheader" EXTRA_OECONF = "INSTALL_USER=root INSTALL_GROUP=root" EXTRA_OECONF_append_class-native = " --enable-gettext=no" +EXTRA_OECONF_append_class-target = "${@['', ' --disable-gettext '][(d.getVar('USE_NLS', True) == 'no')]}" EXTRA_OEMAKE = "PKG_LIB_DIR=${base_libdir} PKG_DEVLIB_DIR=${libdir}" @@ -17,36 +18,33 @@ do_install () { oe_runmake install install-lib install-dev DIST_ROOT="${D}" } -PACKAGES =+ "lib${BPN}" - -FILES_lib${BPN} = "${base_libdir}/lib*${SOLIBS}" - -BBCLASSEXTEND = "native" -# Only append ldflags for target recipe and if USE_NLS is enabled -LDFLAGS_append_libc-uclibc_class-target = "${@['', ' -lintl '][(d.getVar('USE_NLS', True) == 'yes')]}" -EXTRA_OECONF_append_libc-uclibc_class-target = "${@['', ' --disable-gettext '][(d.getVar('USE_NLS', True) == 'no')]}" - -fix_symlink () { - if [ "${BB_CURRENTTASK}" != "populate_sysroot" -a "${BB_CURRENTTASK}" != "populate_sysroot_setscene" ] - then - return - fi - +do_install_append_class-native () { if test "${libdir}" = "${base_libdir}" ; then return fi + librelpath=${@os.path.relpath(d.getVar('libdir',True), d.getVar('base_libdir', True))} + baselibrelpath=${@os.path.relpath(d.getVar('base_libdir',True), d.getVar('libdir', True))} + # Remove bad symlinks & create the correct symlinks - if test -L ${libdir}/lib${BPN}.so ; then - rm -rf ${libdir}/lib${BPN}.so - ln -sf ${base_libdir}/lib${BPN}.so ${libdir}/lib${BPN}.so + if test -L ${D}${libdir}/lib${BPN}.so ; then + rm -rf ${D}${libdir}/lib${BPN}.so + ln -sf $baselibrelpath/lib${BPN}.so ${D}${libdir}/lib${BPN}.so fi - if test -L ${base_libdir}/lib${BPN}.a ; then - rm -rf ${base_libdir}/lib${BPN}.a - ln -sf ${libdir}/lib${BPN}.a ${base_libdir}/lib${BPN}.a + if test -L ${D}${base_libdir}/lib${BPN}.a ; then + rm -rf ${D}${base_libdir}/lib${BPN}.a + ln -sf $librelpath/lib${BPN}.a ${D}${base_libdir}/lib${BPN}.a fi - if test -L ${base_libdir}/lib${BPN}.la ; then - rm -rf ${base_libdir}/lib${BPN}.la - ln -sf ${libdir}/lib${BPN}.la ${base_libdir}/lib${BPN}.la + if test -L ${D}${base_libdir}/lib${BPN}.la ; then + rm -rf ${D}${base_libdir}/lib${BPN}.la + ln -sf $librelpath/lib${BPN}.la ${D}${base_libdir}/lib${BPN}.la fi } -SSTATEPOSTINSTFUNCS_class-native += "fix_symlink" + +PACKAGES =+ "lib${BPN}" + +FILES_lib${BPN} = "${base_libdir}/lib*${SOLIBS}" + +BBCLASSEXTEND = "native" +# Only append ldflags for target recipe and if USE_NLS is enabled +LDFLAGS_append_libc-uclibc_class-target = "${@['', ' -lintl '][(d.getVar('USE_NLS', True) == 'yes')]}" +EXTRA_OECONF_append_libc-uclibc_class-target = "${@['', ' --disable-gettext '][(d.getVar('USE_NLS', True) == 'no')]}" diff --git a/import-layers/yocto-poky/meta/recipes-support/boost/boost.inc b/import-layers/yocto-poky/meta/recipes-support/boost/boost.inc index ef16533ba..1966d3d80 100644 --- a/import-layers/yocto-poky/meta/recipes-support/boost/boost.inc +++ b/import-layers/yocto-poky/meta/recipes-support/boost/boost.inc @@ -34,6 +34,8 @@ BOOST_LIBS_append_x86-64 = " context coroutine" BOOST_LIBS_append_powerpc = " context coroutine" # need consistent settings for native builds (x86 override not applied for native) BOOST_LIBS_remove_class-native = " context coroutine" +# does not compile +BOOST_LIBS_remove_mips16e = "wave" # optional libraries PACKAGECONFIG ??= "locale" diff --git a/import-layers/yocto-poky/meta/recipes-support/curl/curl_7.50.1.bb b/import-layers/yocto-poky/meta/recipes-support/curl/curl_7.50.1.bb index a21419a4d..653fa2e7a 100644 --- a/import-layers/yocto-poky/meta/recipes-support/curl/curl_7.50.1.bb +++ b/import-layers/yocto-poky/meta/recipes-support/curl/curl_7.50.1.bb @@ -17,6 +17,7 @@ SRC_URI += " file://configure_ac.patch" SRC_URI[md5sum] = "015f6a0217ca6f2c5442ca406476920b" SRC_URI[sha256sum] = "3c12c5f54ccaa1d40abc65d672107dcc75d3e1fcb38c267484334280096e5156" +CVE_PRODUCT = "libcurl" inherit autotools pkgconfig binconfig multilib_header PACKAGECONFIG ??= "${@bb.utils.contains("DISTRO_FEATURES", "ipv6", "ipv6", "", d)} gnutls proxy zlib" diff --git a/import-layers/yocto-poky/meta/recipes-support/db/db_6.0.35.bb b/import-layers/yocto-poky/meta/recipes-support/db/db_6.0.35.bb index 32afbe82d..0f69cab76 100644 --- a/import-layers/yocto-poky/meta/recipes-support/db/db_6.0.35.bb +++ b/import-layers/yocto-poky/meta/recipes-support/db/db_6.0.35.bb @@ -77,15 +77,6 @@ do_configure() { oe_runconf } -# Override the MUTEX setting here, the POSIX library is -# the default - "POSIX/pthreads/library". -# Don't ignore the nice SWP instruction on the ARM: -# These enable the ARM assembler mutex code -ARM_MUTEX = "--with-mutex=ARM/gcc-assembly" -MUTEX = "" -MUTEX_arm = "${ARM_MUTEX}" -MUTEX_armeb = "${ARM_MUTEX}" -EXTRA_OECONF += "${MUTEX}" EXTRA_OEMAKE_append_class-target = " LIBTOOL=${STAGING_BINDIR_CROSS}/${HOST_SYS}-libtool" EXTRA_OEMAKE += "STRIP=true" diff --git a/import-layers/yocto-poky/meta/recipes-support/gmp/gmp_6.1.1.bb b/import-layers/yocto-poky/meta/recipes-support/gmp/gmp_6.1.1.bb index 303c3ef9e..614d21ab7 100644 --- a/import-layers/yocto-poky/meta/recipes-support/gmp/gmp_6.1.1.bb +++ b/import-layers/yocto-poky/meta/recipes-support/gmp/gmp_6.1.1.bb @@ -21,6 +21,7 @@ SRC_URI[sha256sum] = "a8109865f2893f1373b0a8ed5ff7429de8db696fc451b1036bd7bdf95b acpaths = "" EXTRA_OECONF += " --enable-cxx=detect" +EXTRA_OECONF_mipsarchr6_append = " --disable-assembly" PACKAGES =+ "libgmpxx" FILES_libgmpxx = "${libdir}/libgmpxx${SOLIBS}" diff --git a/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0001-Replace-__BEGIN_DECLS-and-__END_DECLS.patch b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0001-Replace-__BEGIN_DECLS-and-__END_DECLS.patch new file mode 100644 index 000000000..e97e30e84 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0001-Replace-__BEGIN_DECLS-and-__END_DECLS.patch @@ -0,0 +1,363 @@ +From 88adbe1a855b7aa95bd925c80ed83c86f3fc42e3 Mon Sep 17 00:00:00 2001 +From: Khem Raj <raj.khem@gmail.com> +Date: Sun, 6 Nov 2016 09:39:31 -0800 +Subject: [PATCH 1/3] Replace __BEGIN_DECLS and __END_DECLS + +Signed-off-by: Khem Raj <raj.khem@gmail.com> +--- +Upstream-Status: Pending + + include/bsd/err.h | 10 ++++++++-- + include/bsd/libutil.h | 10 ++++++++-- + include/bsd/md5.h | 10 ++++++++-- + include/bsd/nlist.h | 10 ++++++++-- + include/bsd/readpassphrase.h | 10 ++++++++-- + include/bsd/stdio.h | 10 ++++++++-- + include/bsd/stdlib.h | 10 ++++++++-- + include/bsd/string.h | 10 ++++++++-- + include/bsd/stringlist.h | 10 ++++++++-- + include/bsd/unistd.h | 10 ++++++++-- + include/bsd/vis.h | 10 ++++++++-- + include/bsd/wchar.h | 10 ++++++++-- + src/hash/sha512.h | 10 ++++++++-- + 13 files changed, 104 insertions(+), 26 deletions(-) + +diff --git a/include/bsd/err.h b/include/bsd/err.h +index 12fd051..43dfc32 100644 +--- a/include/bsd/err.h ++++ b/include/bsd/err.h +@@ -42,7 +42,10 @@ + + #include <stdarg.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + void warnc(int code, const char *format, ...) + __printflike(2, 3); + void vwarnc(int code, const char *format, va_list ap) +@@ -51,6 +54,9 @@ void errc(int status, int code, const char *format, ...) + __printflike(3, 4); + void verrc(int status, int code, const char *format, va_list ap) + __printflike(3, 0); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif +diff --git a/include/bsd/libutil.h b/include/bsd/libutil.h +index ebb6160..28b919d 100644 +--- a/include/bsd/libutil.h ++++ b/include/bsd/libutil.h +@@ -53,7 +53,10 @@ struct pidfh { + ino_t pf_ino; + }; + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + int humanize_number(char *buf, size_t len, int64_t bytes, + const char *suffix, int scale, int flags); + int expand_number(const char *_buf, uint64_t *_num); +@@ -66,7 +69,10 @@ int pidfile_close(struct pidfh *pfh); + int pidfile_remove(struct pidfh *pfh); + + char *fparseln(FILE *, size_t *, size_t *, const char[3], int); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + /* humanize_number(3) */ + #define HN_DECIMAL 0x01 +diff --git a/include/bsd/md5.h b/include/bsd/md5.h +index 9a75fad..3531fd6 100644 +--- a/include/bsd/md5.h ++++ b/include/bsd/md5.h +@@ -30,7 +30,10 @@ typedef struct MD5Context { + #include <sys/cdefs.h> + #include <sys/types.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + void MD5Init(MD5_CTX *); + void MD5Update(MD5_CTX *, const uint8_t *, size_t) + __attribute__((__bounded__(__string__,2,3))); +@@ -49,6 +52,9 @@ char *MD5FileChunk(const char *, char *, off_t, off_t) + char *MD5Data(const uint8_t *, size_t, char *) + __attribute__((__bounded__(__string__,1,2))) + __attribute__((__bounded__(__minbytes__,3,MD5_DIGEST_STRING_LENGTH))); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif /* _MD5_H_ */ +diff --git a/include/bsd/nlist.h b/include/bsd/nlist.h +index 2730237..0389ab7 100644 +--- a/include/bsd/nlist.h ++++ b/include/bsd/nlist.h +@@ -30,8 +30,14 @@ + #include <sys/cdefs.h> + #include <a.out.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + extern int nlist(const char *filename, struct nlist *list); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif +diff --git a/include/bsd/readpassphrase.h b/include/bsd/readpassphrase.h +index e1dacc3..76e0d33 100644 +--- a/include/bsd/readpassphrase.h ++++ b/include/bsd/readpassphrase.h +@@ -34,8 +34,14 @@ + #include <sys/cdefs.h> + #include <sys/types.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + char * readpassphrase(const char *, char *, size_t, int); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif /* !_READPASSPHRASE_H_ */ +diff --git a/include/bsd/stdio.h b/include/bsd/stdio.h +index 7697425..b5b3efd 100644 +--- a/include/bsd/stdio.h ++++ b/include/bsd/stdio.h +@@ -41,7 +41,10 @@ + #include <sys/cdefs.h> + #include <sys/types.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + const char *fmtcheck(const char *, const char *); + + /* XXX: The function requires cooperation from the system libc to store the +@@ -69,7 +72,10 @@ FILE *funopen(const void *cookie, + #define fwopen(cookie, fn) funopen(cookie, NULL, fn, NULL, NULL) + + int fpurge(FILE *fp); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif + #endif +diff --git a/include/bsd/stdlib.h b/include/bsd/stdlib.h +index 0604cad..b9f0515 100644 +--- a/include/bsd/stdlib.h ++++ b/include/bsd/stdlib.h +@@ -46,7 +46,10 @@ + #include <sys/stat.h> + #include <stdint.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + uint32_t arc4random(void); + void arc4random_stir(void); + void arc4random_addrandom(u_char *dat, int datlen); +@@ -73,6 +76,9 @@ long long strtonum(const char *nptr, long long minval, long long maxval, + const char **errstr); + + char *getbsize(int *headerlenp, long *blocksizep); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif +diff --git a/include/bsd/string.h b/include/bsd/string.h +index ee2f953..fbf8c54 100644 +--- a/include/bsd/string.h ++++ b/include/bsd/string.h +@@ -36,13 +36,19 @@ + #include <sys/cdefs.h> + #include <sys/types.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + size_t strlcpy(char *dst, const char *src, size_t siz); + size_t strlcat(char *dst, const char *src, size_t siz); + char *strnstr(const char *str, const char *find, size_t str_len); + void strmode(mode_t mode, char *str); + + void explicit_bzero(void *buf, size_t len); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif +diff --git a/include/bsd/stringlist.h b/include/bsd/stringlist.h +index e3c42e9..40d0a52 100644 +--- a/include/bsd/stringlist.h ++++ b/include/bsd/stringlist.h +@@ -43,12 +43,18 @@ typedef struct _stringlist { + size_t sl_cur; + } StringList; + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + StringList *sl_init(void); + int sl_add(StringList *, char *); + void sl_free(StringList *, int); + char *sl_find(StringList *, const char *); + int sl_delete(StringList *, const char *, int); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif /* _STRINGLIST_H */ +diff --git a/include/bsd/unistd.h b/include/bsd/unistd.h +index 1f9c5f8..5b2f4c7 100644 +--- a/include/bsd/unistd.h ++++ b/include/bsd/unistd.h +@@ -45,7 +45,10 @@ + #define S_ISTXT S_ISVTX + #endif + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + extern int optreset; + + #ifdef LIBBSD_OVERLAY +@@ -68,6 +71,9 @@ void setproctitle(const char *fmt, ...) + __printflike(1, 2); + + int getpeereid(int s, uid_t *euid, gid_t *egid); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif +diff --git a/include/bsd/vis.h b/include/bsd/vis.h +index 835d2d6..63c951e 100644 +--- a/include/bsd/vis.h ++++ b/include/bsd/vis.h +@@ -74,7 +74,10 @@ + + #include <sys/cdefs.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + char *vis(char *, int, int, int); + int strvis(char *, const char *, int); + int strvisx(char *, const char *, size_t, int); +@@ -83,6 +86,9 @@ int strunvis(char *, const char *); + int strunvisx(char *, const char *, int); + ssize_t strnunvis(char *, const char *, size_t); + int unvis(char *, int, int *, int); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif /* !_VIS_H_ */ +diff --git a/include/bsd/wchar.h b/include/bsd/wchar.h +index 33a500e..aa70742 100644 +--- a/include/bsd/wchar.h ++++ b/include/bsd/wchar.h +@@ -43,12 +43,18 @@ + #include <sys/cdefs.h> + #include <sys/types.h> + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + wchar_t *fgetwln(FILE *stream, size_t *len); + + size_t wcslcat(wchar_t *dst, const wchar_t *src, size_t size); + size_t wcslcpy(wchar_t *dst, const wchar_t *src, size_t size); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif + #endif +diff --git a/src/hash/sha512.h b/src/hash/sha512.h +index 4f368a1..27ddc24 100644 +--- a/src/hash/sha512.h ++++ b/src/hash/sha512.h +@@ -39,7 +39,10 @@ typedef struct SHA512Context { + unsigned char buf[128]; + } SHA512_CTX; + +-__BEGIN_DECLS ++/* __BEGIN_DECLS */ ++#ifdef __cplusplus ++extern "C" { ++#endif + + void SHA512_Init(SHA512_CTX *); + void SHA512_Update(SHA512_CTX *, const void *, size_t); +@@ -48,6 +51,9 @@ char *SHA512_End(SHA512_CTX *, char *); + char *SHA512_File(const char *, char *); + char *SHA512_FileChunk(const char *, char *, off_t, off_t); + char *SHA512_Data(const void *, unsigned int, char *); +-__END_DECLS ++#ifdef __cplusplus ++} ++#endif ++/* __END_DECLS */ + + #endif /* !_SHA512_H_ */ +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0002-Remove-funopen.patch b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0002-Remove-funopen.patch new file mode 100644 index 000000000..83ce7c8dd --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0002-Remove-funopen.patch @@ -0,0 +1,55 @@ +From 28fc66e8b848709a2e69dba7f07694248e0154e8 Mon Sep 17 00:00:00 2001 +From: Khem Raj <raj.khem@gmail.com> +Date: Sun, 6 Nov 2016 09:40:43 -0800 +Subject: [PATCH 2/3] Remove funopen() + +Musl doesnt have prerequisites for it. + +Signed-off-by: Khem Raj <raj.khem@gmail.com> +--- +Upstream-Status: Inappropriate [musl specific] + + man/Makefile.am | 1 - + src/Makefile.am | 1 - + test/Makefile.am | 1 - + 3 files changed, 3 deletions(-) + +diff --git a/man/Makefile.am b/man/Makefile.am +index e4d6e4a..c701d94 100644 +--- a/man/Makefile.am ++++ b/man/Makefile.am +@@ -29,7 +29,6 @@ dist_man_MANS = \ + flopen.3 \ + fmtcheck.3 \ + fparseln.3 \ +- funopen.3bsd \ + getbsize.3 \ + getmode.3 \ + getpeereid.3 \ +diff --git a/src/Makefile.am b/src/Makefile.am +index ad83dbf..13225a3 100644 +--- a/src/Makefile.am ++++ b/src/Makefile.am +@@ -76,7 +76,6 @@ libbsd_la_SOURCES = \ + fmtcheck.c \ + fparseln.c \ + fpurge.c \ +- funopen.c \ + getbsize.c \ + getpeereid.c \ + hash/md5.c \ +diff --git a/test/Makefile.am b/test/Makefile.am +index a75c8ff..e3a1d41 100644 +--- a/test/Makefile.am ++++ b/test/Makefile.am +@@ -36,7 +36,6 @@ check_PROGRAMS = \ + endian \ + humanize \ + fgetln \ +- funopen \ + fparseln \ + fpurge \ + md5 \ +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0003-Fix-build-breaks-due-to-missing-a.out.h.patch b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0003-Fix-build-breaks-due-to-missing-a.out.h.patch new file mode 100644 index 000000000..176d940fc --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd/0003-Fix-build-breaks-due-to-missing-a.out.h.patch @@ -0,0 +1,130 @@ +From a1b93c25311834f2f411e9bfe2e616899ba2122d Mon Sep 17 00:00:00 2001 +From: Khem Raj <raj.khem@gmail.com> +Date: Sun, 6 Nov 2016 10:23:55 -0800 +Subject: [PATCH 3/3] Fix build breaks due to missing a.out.h + +Signed-off-by: Khem Raj <raj.khem@gmail.com> +--- +Upstream-Status: Pending + + include/bsd/nlist.h | 1 - + include/bsd/nlist.h => src/local-aout.h | 47 ++++++++++++++++++++++----------- + src/nlist.c | 9 +++++++ + 3 files changed, 41 insertions(+), 16 deletions(-) + copy include/bsd/nlist.h => src/local-aout.h (63%) + +diff --git a/include/bsd/nlist.h b/include/bsd/nlist.h +index 0389ab7..9c7e3d8 100644 +--- a/include/bsd/nlist.h ++++ b/include/bsd/nlist.h +@@ -28,7 +28,6 @@ + #define LIBBSD_NLIST_H + + #include <sys/cdefs.h> +-#include <a.out.h> + + /* __BEGIN_DECLS */ + #ifdef __cplusplus +diff --git a/include/bsd/nlist.h b/src/local-aout.h +similarity index 63% +copy from include/bsd/nlist.h +copy to src/local-aout.h +index 0389ab7..2adb93e 100644 +--- a/include/bsd/nlist.h ++++ b/src/local-aout.h +@@ -1,5 +1,5 @@ + /* +- * Copyright © 2009 Guillem Jover <guillem@hadrons.org> ++ * Copyright © 2016 Khem Raj <raj.khem@gmail.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions +@@ -24,20 +24,37 @@ + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +-#ifndef LIBBSD_NLIST_H +-#define LIBBSD_NLIST_H ++#ifndef LIBBSD_LOCAL_AOUT_H ++#define LIBBSD_LOCAL_AOUT_H + +-#include <sys/cdefs.h> +-#include <a.out.h> ++#define N_UNDF 0 ++#define N_ABS 2 ++#define N_TEXT 4 ++#define N_DATA 6 ++#define N_BSS 8 ++#define N_FN 15 ++#define N_EXT 1 ++#define N_TYPE 036 ++#define N_STAB 0340 ++#define N_INDR 0xa ++#define N_SETA 0x14 /* Absolute set element symbol. */ ++#define N_SETT 0x16 /* Text set element symbol. */ ++#define N_SETD 0x18 /* Data set element symbol. */ ++#define N_SETB 0x1A /* Bss set element symbol. */ ++#define N_SETV 0x1C /* Pointer to set vector in data area. */ + +-/* __BEGIN_DECLS */ +-#ifdef __cplusplus +-extern "C" { +-#endif +-extern int nlist(const char *filename, struct nlist *list); +-#ifdef __cplusplus +-} +-#endif +-/* __END_DECLS */ ++struct nlist ++{ ++ union ++ { ++ char *n_name; ++ struct nlist *n_next; ++ long n_strx; ++ } n_un; ++ unsigned char n_type; ++ char n_other; ++ short n_desc; ++ unsigned long n_value; ++}; + +-#endif ++#endif /* LIBBSD_LOCAL_AOUT_H */ +diff --git a/src/nlist.c b/src/nlist.c +index 0cffe55..625d310 100644 +--- a/src/nlist.c ++++ b/src/nlist.c +@@ -40,7 +40,11 @@ static char sccsid[] = "@(#)nlist.c 8.1 (Berkeley) 6/4/93"; + + #include <errno.h> + #include <fcntl.h> ++#ifdef __GLIBC__ + #include <a.out.h> ++#else ++#define __NO_A_OUT_SUPPORT ++#endif + #include <stdio.h> + #include <string.h> + #include <unistd.h> +@@ -48,12 +52,17 @@ static char sccsid[] = "@(#)nlist.c 8.1 (Berkeley) 6/4/93"; + #if !defined(__NO_A_OUT_SUPPORT) + #define _NLIST_DO_AOUT + #endif ++ + #define _NLIST_DO_ELF + + #ifdef _NLIST_DO_ELF + #include "local-elf.h" + #endif + ++#ifdef _NLIST_DO_ELF ++#include "local-aout.h" ++#endif ++ + #define SIZE_T_MAX 0xffffffffU + + #ifdef _NLIST_DO_AOUT +-- +2.10.2 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd_0.8.3.bb b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd_0.8.3.bb index 92121efa8..fadd7886a 100644 --- a/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd_0.8.3.bb +++ b/import-layers/yocto-poky/meta/recipes-support/libbsd/libbsd_0.8.3.bb @@ -36,8 +36,15 @@ SECTION = "libs" SRC_URI = " \ http://libbsd.freedesktop.org/releases/${BPN}-${PV}.tar.xz \ " +SRC_URI_append_libc-musl = " \ + file://0001-Replace-__BEGIN_DECLS-and-__END_DECLS.patch \ + file://0002-Remove-funopen.patch \ + file://0003-Fix-build-breaks-due-to-missing-a.out.h.patch \ +" SRC_URI[md5sum] = "e935c1bb6cc98a4a43cb1da22795493a" SRC_URI[sha256sum] = "934b634f4dfd865b6482650b8f522c70ae65c463529de8be907b53c89c3a34a8" inherit autotools pkgconfig + +BBCLASSEXTEND = "native nativesdk" diff --git a/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/0001-ecc-Store-EdDSA-session-key-in-secure-memory.patch b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/0001-ecc-Store-EdDSA-session-key-in-secure-memory.patch new file mode 100644 index 000000000..f6c4ca76f --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/0001-ecc-Store-EdDSA-session-key-in-secure-memory.patch @@ -0,0 +1,39 @@ +CVE: CVE-2017-9526 +Upstream-Status: Backport +Signed-off-by: Ross Burton <ross.burton@intel.com> + +From b3cab278eb9c2ceda79f980bc26460d97f260041 Mon Sep 17 00:00:00 2001 +From: Jo Van Bulck <jo.vanbulck@cs.kuleuven.be> +Date: Thu, 19 Jan 2017 17:00:15 +0100 +Subject: [PATCH] ecc: Store EdDSA session key in secure memory. + +* cipher/ecc-eddsa.c (_gcry_ecc_eddsa_sign): use mpi_snew to allocate +session key. +-- + +An attacker who learns the EdDSA session key from side-channel +observation during the signing process, can easily revover the long- +term secret key. Storing the session key in secure memory ensures that +constant time point operations are used in the MPI library. + +Signed-off-by: Jo Van Bulck <jo.vanbulck@cs.kuleuven.be> +--- + cipher/ecc-eddsa.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/cipher/ecc-eddsa.c b/cipher/ecc-eddsa.c +index f91f8489..813e030d 100644 +--- a/cipher/ecc-eddsa.c ++++ b/cipher/ecc-eddsa.c +@@ -603,7 +603,7 @@ _gcry_ecc_eddsa_sign (gcry_mpi_t input, ECC_secret_key *skey, + a = mpi_snew (0); + x = mpi_new (0); + y = mpi_new (0); +- r = mpi_new (0); ++ r = mpi_snew (0); + ctx = _gcry_mpi_ec_p_internal_new (skey->E.model, skey->E.dialect, 0, + skey->E.p, skey->E.a, skey->E.b); + b = (ctx->nbits+7)/8; +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2017-7526.patch b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2017-7526.patch new file mode 100644 index 000000000..7180e7af2 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/files/CVE-2017-7526.patch @@ -0,0 +1,455 @@ +Flush+reload side-channel attack on RSA secret keys dubbed "Sliding right +into disaster". + +CVE: CVE-2017-7526 +Upstream-Status: Backport +Signed-off-by: Ross Burton <ross.burton@intel.com> + +From 56bd068335500207dea2cece9cc662bcd9658951 Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka <gniibe@fsij.org> +Date: Tue, 4 Apr 2017 17:38:05 +0900 +Subject: [PATCH 1/5] mpi: Simplify mpi_powm. + +* mpi/mpi-pow.c (_gcry_mpi_powm): Simplify the loop. + +-- + +This fix is not a solution for the problem reported (yet). The +problem is that the current algorithm of _gcry_mpi_powm depends on +exponent and some information leaks is possible. + +Reported-by: Andreas Zankl <andreas.zankl@aisec.fraunhofer.de> +Signed-off-by: NIIBE Yutaka <gniibe@fsij.org> + +(backport from master commit: +719468e53133d3bdf12156c5bfdea2bf15f9f6f1) + +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + mpi/mpi-pow.c | 105 +++++++++++++++++----------------------------------------- + 1 file changed, 30 insertions(+), 75 deletions(-) + +diff --git a/mpi/mpi-pow.c b/mpi/mpi-pow.c +index a780ebd1..7b3dc318 100644 +--- a/mpi/mpi-pow.c ++++ b/mpi/mpi-pow.c +@@ -609,12 +609,8 @@ _gcry_mpi_powm (gcry_mpi_t res, + if (e == 0) + { + j += c; +- i--; +- if ( i < 0 ) +- { +- c = 0; +- break; +- } ++ if ( --i < 0 ) ++ break; + + e = ep[i]; + c = BITS_PER_MPI_LIMB; +@@ -629,38 +625,33 @@ _gcry_mpi_powm (gcry_mpi_t res, + c -= c0; + j += c0; + ++ e0 = (e >> (BITS_PER_MPI_LIMB - W)); + if (c >= W) +- { +- e0 = (e >> (BITS_PER_MPI_LIMB - W)); +- e = (e << W); +- c -= W; +- } ++ c0 = 0; + else + { +- i--; +- if ( i < 0 ) ++ if ( --i < 0 ) + { +- e = (e >> (BITS_PER_MPI_LIMB - c)); +- break; ++ e0 = (e >> (BITS_PER_MPI_LIMB - c)); ++ j += c - W; ++ goto last_step; ++ } ++ else ++ { ++ c0 = c; ++ e = ep[i]; ++ c = BITS_PER_MPI_LIMB; ++ e0 |= (e >> (BITS_PER_MPI_LIMB - (W - c0))); + } +- +- c0 = c; +- e0 = (e >> (BITS_PER_MPI_LIMB - W)) +- | (ep[i] >> (BITS_PER_MPI_LIMB - W + c0)); +- e = (ep[i] << (W - c0)); +- c = BITS_PER_MPI_LIMB - W + c0; + } + ++ e = e << (W - c0); ++ c -= (W - c0); ++ ++ last_step: + count_trailing_zeros (c0, e0); + e0 = (e0 >> c0) >> 1; + +- for (j += W - c0; j; j--) +- { +- mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); +- tp = rp; rp = xp; xp = tp; +- rsize = xsize; +- } +- + /* + * base_u <= precomp[e0] + * base_u_size <= precomp_size[e0] +@@ -677,25 +668,23 @@ _gcry_mpi_powm (gcry_mpi_t res, + u.d = precomp[k]; + + mpi_set_cond (&w, &u, k == e0); +- base_u_size |= (precomp_size[k] & ((mpi_size_t)0 - (k == e0)) ); ++ base_u_size |= ( precomp_size[k] & ((mpi_size_t)0 - (k == e0)) ); + } + +- mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, +- mp, msize, &karactx); +- tp = rp; rp = xp; xp = tp; +- rsize = xsize; ++ for (j += W - c0; j >= 0; j--) ++ { ++ mul_mod (xp, &xsize, rp, rsize, ++ j == 0 ? base_u : rp, j == 0 ? base_u_size : rsize, ++ mp, msize, &karactx); ++ tp = rp; rp = xp; xp = tp; ++ rsize = xsize; ++ } + + j = c0; ++ if ( i < 0 ) ++ break; + } + +- if (c != 0) +- { +- j += c; +- count_trailing_zeros (c, e); +- e = (e >> c); +- j -= c; +- } +- + while (j--) + { + mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); +@@ -703,40 +692,6 @@ _gcry_mpi_powm (gcry_mpi_t res, + rsize = xsize; + } + +- if (e != 0) +- { +- /* +- * base_u <= precomp[(e>>1)] +- * base_u_size <= precomp_size[(e>>1)] +- */ +- base_u_size = 0; +- for (k = 0; k < (1<< (W - 1)); k++) +- { +- struct gcry_mpi w, u; +- w.alloced = w.nlimbs = precomp_size[k]; +- u.alloced = u.nlimbs = precomp_size[k]; +- w.sign = u.sign = 0; +- w.flags = u.flags = 0; +- w.d = base_u; +- u.d = precomp[k]; +- +- mpi_set_cond (&w, &u, k == (e>>1)); +- base_u_size |= (precomp_size[k] & ((mpi_size_t)0 - (k == (e>>1))) ); +- } +- +- mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, +- mp, msize, &karactx); +- tp = rp; rp = xp; xp = tp; +- rsize = xsize; +- +- for (; c; c--) +- { +- mul_mod (xp, &xsize, rp, rsize, rp, rsize, mp, msize, &karactx); +- tp = rp; rp = xp; xp = tp; +- rsize = xsize; +- } +- } +- + /* We shifted MOD, the modulo reduction argument, left + MOD_SHIFT_CNT steps. Adjust the result by reducing it with the + original MOD. +-- +2.11.0 + + +From 6e237c8c48d257dc315e364791d284c6bf3fa703 Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka <gniibe@fsij.org> +Date: Sat, 24 Jun 2017 20:46:20 +0900 +Subject: [PATCH 2/5] Same computation for square and multiply. + +* mpi/mpi-pow.c (_gcry_mpi_powm): Compare msize for max_u_size. Move +the assignment to base_u into the loop. Copy content refered by RP to +BASE_U except the last of the loop. + +-- + +Signed-off-by: NIIBE Yutaka <gniibe@fsij.org> +(backport from master commit: +78130828e9a140a9de4dafadbc844dbb64cb709a) + +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + mpi/mpi-pow.c | 50 +++++++++++++++++++++++++++++--------------------- + 1 file changed, 29 insertions(+), 21 deletions(-) + +diff --git a/mpi/mpi-pow.c b/mpi/mpi-pow.c +index 7b3dc318..3cba6903 100644 +--- a/mpi/mpi-pow.c ++++ b/mpi/mpi-pow.c +@@ -573,6 +573,8 @@ _gcry_mpi_powm (gcry_mpi_t res, + MPN_COPY (precomp[i], rp, rsize); + } + ++ if (msize > max_u_size) ++ max_u_size = msize; + base_u = mpi_alloc_limb_space (max_u_size, esec); + MPN_ZERO (base_u, max_u_size); + +@@ -619,6 +621,10 @@ _gcry_mpi_powm (gcry_mpi_t res, + { + int c0; + mpi_limb_t e0; ++ struct gcry_mpi w, u; ++ w.sign = u.sign = 0; ++ w.flags = u.flags = 0; ++ w.d = base_u; + + count_leading_zeros (c0, e); + e = (e << c0); +@@ -652,29 +658,31 @@ _gcry_mpi_powm (gcry_mpi_t res, + count_trailing_zeros (c0, e0); + e0 = (e0 >> c0) >> 1; + +- /* +- * base_u <= precomp[e0] +- * base_u_size <= precomp_size[e0] +- */ +- base_u_size = 0; +- for (k = 0; k < (1<< (W - 1)); k++) +- { +- struct gcry_mpi w, u; +- w.alloced = w.nlimbs = precomp_size[k]; +- u.alloced = u.nlimbs = precomp_size[k]; +- w.sign = u.sign = 0; +- w.flags = u.flags = 0; +- w.d = base_u; +- u.d = precomp[k]; +- +- mpi_set_cond (&w, &u, k == e0); +- base_u_size |= ( precomp_size[k] & ((mpi_size_t)0 - (k == e0)) ); +- } +- + for (j += W - c0; j >= 0; j--) + { +- mul_mod (xp, &xsize, rp, rsize, +- j == 0 ? base_u : rp, j == 0 ? base_u_size : rsize, ++ ++ /* ++ * base_u <= precomp[e0] ++ * base_u_size <= precomp_size[e0] ++ */ ++ base_u_size = 0; ++ for (k = 0; k < (1<< (W - 1)); k++) ++ { ++ w.alloced = w.nlimbs = precomp_size[k]; ++ u.alloced = u.nlimbs = precomp_size[k]; ++ u.d = precomp[k]; ++ ++ mpi_set_cond (&w, &u, k == e0); ++ base_u_size |= ( precomp_size[k] & (0UL - (k == e0)) ); ++ } ++ ++ w.alloced = w.nlimbs = rsize; ++ u.alloced = u.nlimbs = rsize; ++ u.d = rp; ++ mpi_set_cond (&w, &u, j != 0); ++ base_u_size ^= ((base_u_size ^ rsize) & (0UL - (j != 0))); ++ ++ mul_mod (xp, &xsize, rp, rsize, base_u, base_u_size, + mp, msize, &karactx); + tp = rp; rp = xp; xp = tp; + rsize = xsize; +-- +2.11.0 + + +From bf059348dafc1b8d29e07b9426d870ead853db84 Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka <gniibe@fsij.org> +Date: Thu, 29 Jun 2017 11:48:44 +0900 +Subject: [PATCH 3/5] rsa: Add exponent blinding. + +* cipher/rsa.c (secret): Blind secret D with randomized nonce R for +mpi_powm computation. + +-- + +Co-authored-by: Werner Koch <wk@gnupg.org> +Signed-off-by: NIIBE Yutaka <gniibe@fsij.org> + +The paper describing attack: https://eprint.iacr.org/2017/627 + +Sliding right into disaster: Left-to-right sliding windows leak +by Daniel J. Bernstein and Joachim Breitner and Daniel Genkin and +Leon Groot Bruinderink and Nadia Heninger and Tanja Lange and +Christine van Vredendaal and Yuval Yarom + + It is well known that constant-time implementations of modular + exponentiation cannot use sliding windows. However, software + libraries such as Libgcrypt, used by GnuPG, continue to use sliding + windows. It is widely believed that, even if the complete pattern of + squarings and multiplications is observed through a side-channel + attack, the number of exponent bits leaked is not sufficient to + carry out a full key-recovery attack against RSA. Specifically, + 4-bit sliding windows leak only 40% of the bits, and 5-bit sliding + windows leak only 33% of the bits. + + In this paper we demonstrate a complete break of RSA-1024 as + implemented in Libgcrypt. Our attack makes essential use of the fact + that Libgcrypt uses the left-to-right method for computing the + sliding-window expansion. We show for the first time that the + direction of the encoding matters: the pattern of squarings and + multiplications in left-to-right sliding windows leaks significantly + more information about exponent bits than for right-to-left. We show + how to incorporate this additional information into the + Heninger-Shacham algorithm for partial key reconstruction, and use + it to obtain very efficient full key recovery for RSA-1024. We also + provide strong evidence that the same attack works for RSA-2048 with + only moderately more computation. + +Exponent blinding is a kind of workaround to add noise. Signal (leak) +is still there for non-constant-time implementation. + +(backported from master commit: +8725c99ffa41778f382ca97233183bcd687bb0ce) + +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + cipher/rsa.c | 32 +++++++++++++++++++++++++------- + 1 file changed, 25 insertions(+), 7 deletions(-) + +diff --git a/cipher/rsa.c b/cipher/rsa.c +index b6c73741..25e29b5c 100644 +--- a/cipher/rsa.c ++++ b/cipher/rsa.c +@@ -1021,15 +1021,33 @@ secret (gcry_mpi_t output, gcry_mpi_t input, RSA_secret_key *skey ) + gcry_mpi_t m1 = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 ); + gcry_mpi_t m2 = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 ); + gcry_mpi_t h = mpi_alloc_secure( mpi_get_nlimbs(skey->n)+1 ); +- +- /* m1 = c ^ (d mod (p-1)) mod p */ ++ gcry_mpi_t D_blind = mpi_alloc_secure ( mpi_get_nlimbs(skey->n) + 1 ); ++ gcry_mpi_t r; ++ unsigned int r_nbits; ++ ++ r_nbits = mpi_get_nbits (skey->p) / 4; ++ if (r_nbits < 96) ++ r_nbits = 96; ++ r = mpi_alloc_secure ((r_nbits + BITS_PER_MPI_LIMB-1)/BITS_PER_MPI_LIMB); ++ ++ /* d_blind = (d mod (p-1)) + (p-1) * r */ ++ /* m1 = c ^ d_blind mod p */ ++ _gcry_mpi_randomize (r, r_nbits, GCRY_WEAK_RANDOM); ++ mpi_set_highbit (r, r_nbits - 1); + mpi_sub_ui( h, skey->p, 1 ); +- mpi_fdiv_r( h, skey->d, h ); +- mpi_powm( m1, input, h, skey->p ); +- /* m2 = c ^ (d mod (q-1)) mod q */ ++ mpi_mul ( D_blind, h, r ); ++ mpi_fdiv_r ( h, skey->d, h ); ++ mpi_add ( D_blind, D_blind, h ); ++ mpi_powm( m1, input, D_blind, skey->p ); ++ /* d_blind = (d mod (q-1)) + (q-1) * r */ ++ /* m2 = c ^ d_blind mod q */ ++ _gcry_mpi_randomize (r, r_nbits, GCRY_WEAK_RANDOM); ++ mpi_set_highbit (r, r_nbits - 1); + mpi_sub_ui( h, skey->q, 1 ); +- mpi_fdiv_r( h, skey->d, h ); +- mpi_powm( m2, input, h, skey->q ); ++ mpi_mul ( D_blind, h, r ); ++ mpi_fdiv_r ( h, skey->d, h ); ++ mpi_add ( D_blind, D_blind, h ); ++ mpi_powm( m2, input, D_blind, skey->q ); + /* h = u * ( m2 - m1 ) mod q */ + mpi_sub( h, m2, m1 ); + if ( mpi_has_sign ( h ) ) +-- +2.11.0 + + +From 09b9df2675a24e679b7944352ad6385e9e68474f Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka <gniibe@fsij.org> +Date: Thu, 29 Jun 2017 12:36:27 +0900 +Subject: [PATCH 4/5] rsa: Fix exponent blinding. + +* cipher/rsa.c (secret): Free D_BLIND. + +-- + +Fixes-commit: a9f612def801c8145d551d995475e5d51a4c988c +Signed-off-by: NIIBE Yutaka <gniibe@fsij.org> +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + cipher/rsa.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/cipher/rsa.c b/cipher/rsa.c +index 25e29b5c..33f92ebd 100644 +--- a/cipher/rsa.c ++++ b/cipher/rsa.c +@@ -1057,6 +1057,7 @@ secret (gcry_mpi_t output, gcry_mpi_t input, RSA_secret_key *skey ) + mpi_mul ( h, h, skey->p ); + mpi_add ( output, m1, h ); + ++ mpi_free ( D_blind ); + mpi_free ( h ); + mpi_free ( m1 ); + mpi_free ( m2 ); +-- +2.11.0 + + +From 1323fdcf6f2f9fd1da8c5adf396650f15a2a1260 Mon Sep 17 00:00:00 2001 +From: NIIBE Yutaka <gniibe@fsij.org> +Date: Thu, 29 Jun 2017 12:40:19 +0900 +Subject: [PATCH 5/5] rsa: More fix. + +* cipher/rsa.c (secret): Free R. + +-- + +Fixes-commit: a9f612def801c8145d551d995475e5d51a4c988c +Signed-off-by: NIIBE Yutaka <gniibe@fsij.org> +Signed-off-by: Ross Burton <ross.burton@intel.com> +--- + cipher/rsa.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/cipher/rsa.c b/cipher/rsa.c +index 33f92ebd..8d8d157b 100644 +--- a/cipher/rsa.c ++++ b/cipher/rsa.c +@@ -1057,6 +1057,7 @@ secret (gcry_mpi_t output, gcry_mpi_t input, RSA_secret_key *skey ) + mpi_mul ( h, h, skey->p ); + mpi_add ( output, m1, h ); + ++ mpi_free ( r ); + mpi_free ( D_blind ); + mpi_free ( h ); + mpi_free ( m1 ); +-- +2.11.0 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt.inc b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt.inc index 15805cd43..00870e3d2 100644 --- a/import-layers/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt.inc +++ b/import-layers/yocto-poky/meta/recipes-support/libgcrypt/libgcrypt.inc @@ -20,6 +20,8 @@ SRC_URI = "${GNUPG_MIRROR}/libgcrypt/libgcrypt-${PV}.tar.gz \ file://libgcrypt-fix-building-error-with-O2-in-sysroot-path.patch \ file://fix-ICE-failure-on-mips-with-option-O-and-g.patch \ file://fix-undefined-reference-to-pthread.patch \ + file://0001-ecc-Store-EdDSA-session-key-in-secure-memory.patch \ + file://CVE-2017-7526.patch \ " BINCONFIG = "${bindir}/libgcrypt-config" diff --git a/import-layers/yocto-poky/meta/recipes-support/libpcre/libpcre_8.39.bb b/import-layers/yocto-poky/meta/recipes-support/libpcre/libpcre_8.39.bb index 2d4668b52..90dfb3355 100644 --- a/import-layers/yocto-poky/meta/recipes-support/libpcre/libpcre_8.39.bb +++ b/import-layers/yocto-poky/meta/recipes-support/libpcre/libpcre_8.39.bb @@ -7,7 +7,9 @@ HOMEPAGE = "http://www.pcre.org" SECTION = "devel" LICENSE = "BSD" LIC_FILES_CHKSUM = "file://LICENCE;md5=b8221cbf43c5587f90ccf228f1185cc2" -SRC_URI = "ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-${PV}.tar.bz2 \ + + +SRC_URI = "${SOURCEFORGE_MIRROR}/projects/pcre/files/prce/8.39/pcre-${PV}.tar.bz2 \ file://pcre-cross.patch \ file://fix-pcre-name-collision.patch \ file://run-ptest \ diff --git a/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Check-for-integer-overflow-in-xsltAddTextString.patch b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Check-for-integer-overflow-in-xsltAddTextString.patch new file mode 100644 index 000000000..57aaacc58 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Check-for-integer-overflow-in-xsltAddTextString.patch @@ -0,0 +1,80 @@ +From 08ab2774b870de1c7b5a48693df75e8154addae5 Mon Sep 17 00:00:00 2001 +From: Nick Wellnhofer <wellnhofer@aevum.de> +Date: Thu, 12 Jan 2017 15:39:52 +0100 +Subject: [PATCH] Check for integer overflow in xsltAddTextString + +Limit buffer size in xsltAddTextString to INT_MAX. The issue can be +exploited to trigger an out of bounds write on 64-bit systems. + +Originally reported to Chromium: + +https://crbug.com/676623 + +CVE: CVE-2017-5029 +Upstream-Status: Backport + +Signed-off-by: Fan Xin <fan.xin@jp.fujitus.com> + +--- + libxslt/transform.c | 25 ++++++++++++++++++++++--- + libxslt/xsltInternals.h | 4 ++-- + 2 files changed, 24 insertions(+), 5 deletions(-) + +diff --git a/libxslt/transform.c b/libxslt/transform.c +index 519133f..02bff34 100644 +--- a/libxslt/transform.c ++++ b/libxslt/transform.c +@@ -813,13 +813,32 @@ xsltAddTextString(xsltTransformContextPtr ctxt, xmlNodePtr target, + return(target); + + if (ctxt->lasttext == target->content) { ++ int minSize; + +- if (ctxt->lasttuse + len >= ctxt->lasttsize) { ++ /* Check for integer overflow accounting for NUL terminator. */ ++ if (len >= INT_MAX - ctxt->lasttuse) { ++ xsltTransformError(ctxt, NULL, target, ++ "xsltCopyText: text allocation failed\n"); ++ return(NULL); ++ } ++ minSize = ctxt->lasttuse + len + 1; ++ ++ if (ctxt->lasttsize < minSize) { + xmlChar *newbuf; + int size; ++ int extra; ++ ++ /* Double buffer size but increase by at least 100 bytes. */ ++ extra = minSize < 100 ? 100 : minSize; ++ ++ /* Check for integer overflow. */ ++ if (extra > INT_MAX - ctxt->lasttsize) { ++ size = INT_MAX; ++ } ++ else { ++ size = ctxt->lasttsize + extra; ++ } + +- size = ctxt->lasttsize + len + 100; +- size *= 2; + newbuf = (xmlChar *) xmlRealloc(target->content,size); + if (newbuf == NULL) { + xsltTransformError(ctxt, NULL, target, +diff --git a/libxslt/xsltInternals.h b/libxslt/xsltInternals.h +index 060b178..5ad1771 100644 +--- a/libxslt/xsltInternals.h ++++ b/libxslt/xsltInternals.h +@@ -1754,8 +1754,8 @@ struct _xsltTransformContext { + * Speed optimization when coalescing text nodes + */ + const xmlChar *lasttext; /* last text node content */ +- unsigned int lasttsize; /* last text node size */ +- unsigned int lasttuse; /* last text node use */ ++ int lasttsize; /* last text node size */ ++ int lasttuse; /* last text node use */ + /* + * Per Context Debugging + */ +-- +1.9.1 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Link-libraries-with-libm.patch b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Link-libraries-with-libm.patch new file mode 100644 index 000000000..16ffeba10 --- /dev/null +++ b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt/0001-Link-libraries-with-libm.patch @@ -0,0 +1,48 @@ +From 487e2f7e35dad3deec7978ce4478a3d4ea5070e7 Mon Sep 17 00:00:00 2001 +From: Jussi Kukkonen <jussi.kukkonen@intel.com> +Date: Fri, 10 Feb 2017 14:26:59 +0200 +Subject: [PATCH] Link libraries with libm + +Otherwise linking the resulting libraries to a binary (e.g. xsltproc) +fails when using gold linker: +| ../libxslt/.libs/libxslt.so: error: undefined reference to 'fmod' +| ../libxslt/.libs/libxslt.so: error: undefined reference to 'pow' +| ../libexslt/.libs/libexslt.so: error: undefined reference to 'floor' +| collect2: error: ld returned 1 exit status + +Upstream-Status: Submitted [mailing list, Feb 10 2017] +Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com> +--- + libexslt/Makefile.am | 2 +- + libxslt/Makefile.am | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/libexslt/Makefile.am b/libexslt/Makefile.am +index 1cf5138..5449524 100644 +--- a/libexslt/Makefile.am ++++ b/libexslt/Makefile.am +@@ -27,7 +27,7 @@ libexslt_la_SOURCES = \ + libexslt.h \ + dynamic.c + +-libexslt_la_LIBADD = $(top_builddir)/libxslt/libxslt.la $(EXTRA_LIBS) $(LIBGCRYPT_LIBS) ++libexslt_la_LIBADD = $(top_builddir)/libxslt/libxslt.la $(EXTRA_LIBS) $(LIBGCRYPT_LIBS) $(M_LIBS) + libexslt_la_LDFLAGS = $(WIN32_EXTRA_LDFLAGS) -version-info $(LIBEXSLT_VERSION_INFO) + + man_MANS = libexslt.3 +diff --git a/libxslt/Makefile.am b/libxslt/Makefile.am +index d9fed68..9d44c3d 100644 +--- a/libxslt/Makefile.am ++++ b/libxslt/Makefile.am +@@ -62,7 +62,7 @@ else + LIBXSLT_VERSION_SCRIPT = + endif + +-libxslt_la_LIBADD = $(LIBXML_LIBS) $(EXTRA_LIBS) ++libxslt_la_LIBADD = $(LIBXML_LIBS) $(M_LIBS) $(EXTRA_LIBS) + libxslt_la_LDFLAGS = \ + $(WIN32_EXTRA_LDFLAGS) \ + $(LIBXSLT_VERSION_SCRIPT) \ +-- +2.1.4 + diff --git a/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.29.bb b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.29.bb index be747e608..d27c70660 100644 --- a/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.29.bb +++ b/import-layers/yocto-poky/meta/recipes-support/libxslt/libxslt_1.1.29.bb @@ -11,6 +11,8 @@ DEPENDS = "libxml2" SRC_URI = "ftp://xmlsoft.org/libxslt/libxslt-${PV}.tar.gz \ file://pkgconfig_fix.patch \ file://0001-Use-pkg-config-to-find-gcrypt-and-libxml2.patch \ + file://0001-Link-libraries-with-libm.patch \ + file://0001-Check-for-integer-overflow-in-xsltAddTextString.patch \ " SRC_URI[md5sum] = "a129d3c44c022de3b9dcf6d6f288d72e" diff --git a/import-layers/yocto-poky/meta/recipes-support/nspr/nspr/nspr.pc.in b/import-layers/yocto-poky/meta/recipes-support/nspr/nspr/nspr.pc.in index c37d0bcbd..d74caabe4 100644 --- a/import-layers/yocto-poky/meta/recipes-support/nspr/nspr/nspr.pc.in +++ b/import-layers/yocto-poky/meta/recipes-support/nspr/nspr/nspr.pc.in @@ -6,6 +6,6 @@ includedir=OEINCDIR Name: NSPR Description: The Netscape Portable Runtime -Version: 4.9.5 +Version: NSPRVERSION Libs: -L${libdir} -lplds4 -lplc4 -lnspr4 -lpthread -ldl Cflags: diff --git a/import-layers/yocto-poky/meta/recipes-support/nspr/nspr_4.12.bb b/import-layers/yocto-poky/meta/recipes-support/nspr/nspr_4.12.bb index fa6d52357..9345a51f3 100644 --- a/import-layers/yocto-poky/meta/recipes-support/nspr/nspr_4.12.bb +++ b/import-layers/yocto-poky/meta/recipes-support/nspr/nspr_4.12.bb @@ -160,10 +160,13 @@ do_compile_append() { do_install_append() { install -D ${WORKDIR}/nspr.pc.in ${D}${libdir}/pkgconfig/nspr.pc - sed -i s:OEPREFIX:${prefix}:g ${D}${libdir}/pkgconfig/nspr.pc - sed -i s:OELIBDIR:${libdir}:g ${D}${libdir}/pkgconfig/nspr.pc - sed -i s:OEINCDIR:${includedir}:g ${D}${libdir}/pkgconfig/nspr.pc - sed -i s:OEEXECPREFIX:${exec_prefix}:g ${D}${libdir}/pkgconfig/nspr.pc + sed -i \ + -e 's:NSPRVERSION:${PV}:g' \ + -e 's:OEPREFIX:${prefix}:g' \ + -e 's:OELIBDIR:${libdir}:g' \ + -e 's:OEINCDIR:${includedir}:g' \ + -e 's:OEEXECPREFIX:${exec_prefix}:g' \ + ${D}${libdir}/pkgconfig/nspr.pc mkdir -p ${D}${libdir}/nspr/tests install -m 0755 ${S}/pr/tests/runtests.pl ${D}${libdir}/nspr/tests diff --git a/import-layers/yocto-poky/meta/recipes-support/rng-tools/rng-tools_5.bb b/import-layers/yocto-poky/meta/recipes-support/rng-tools/rng-tools_5.bb index 913a092fd..9329e8ad3 100644 --- a/import-layers/yocto-poky/meta/recipes-support/rng-tools/rng-tools_5.bb +++ b/import-layers/yocto-poky/meta/recipes-support/rng-tools/rng-tools_5.bb @@ -2,7 +2,7 @@ SUMMARY = "Random number generator daemon" LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=0b6f033afe6db235e559456585dc8cdc" -SRC_URI = "http://heanet.dl.sourceforge.net/sourceforge/gkernel/${BP}.tar.gz \ +SRC_URI = "${SOURCEFORGE_MIRROR}/gkernel/${BP}.tar.gz \ file://0001-If-the-libc-is-lacking-argp-use-libargp.patch \ file://0002-Add-argument-to-control-the-libargp-dependency.patch \ file://underquote.patch \ diff --git a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py index 2906cc66d..71a71f7af 100755 --- a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py +++ b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py @@ -269,7 +269,7 @@ if __name__ == "__main__": "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " ) m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core ${PN}-lang", - "importlib" ) + "importlib imp.*" ) m.addPackage( "${PN}-gdbm", "Python GNU database support", "${PN}-core", "lib-dynload/_gdbm.*.so" ) diff --git a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py index a3ee325a8..32cab3b64 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py +++ b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py @@ -331,6 +331,7 @@ def write_patch_items(scripts_path, machine, patch_items): patch list [${machine}-user-patches.scc]. """ f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "w") + f.write("mark patching start\n") for item in patch_items: f.write("patch " + item + "\n") f.close() diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend index 2d3d073f7..1d7886576 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1"
\ No newline at end of file diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend index b88a06c52..1725bf4bc 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend index 7c0df8bcd..15d6431e2 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend index aecdff025..8d7e24e6d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend index dd4de311a..16a24f0cd 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend index 137d8fa51..9a97de960 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend index 1e99a04dc..f4efb753a 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend index b88a06c52..1725bf4bc 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend index 7c0df8bcd..15d6431e2 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend index 01a046c75..7d0f7df6b 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend index 57c90fa3a..4d89f5ca4 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend index accf9d5d8..17d1ea6eb 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend index 1e99a04dc..f4efb753a 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend index b88a06c52..1725bf4bc 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend index 7c0df8bcd..15d6431e2 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 14ee16f32..fe22d3e15 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -57,6 +57,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index e256e08bd..828afe557 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -57,6 +57,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index c1635d6f6..df65fd02f 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -57,6 +57,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend index fce67b4aa..689578840 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -56,6 +56,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend index 409793228..3549de58e 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -56,6 +56,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend index e8c3fc82e..4176e5ac4 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -56,6 +56,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend index 0a47a4e37..f4fc21930 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend index 815c77b81..4ec3e480d 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend index 5fb45d9ab..51cb0124c 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend @@ -28,6 +28,6 @@ SRC_URI += "file://{{=machine}}-tiny.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend index 1e99a04dc..f4efb753a 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.1" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend index b88a06c52..1725bf4bc 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.4" diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend index 7c0df8bcd..15d6431e2 100644 --- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend +++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend @@ -27,6 +27,6 @@ SRC_URI += "file://{{=machine}}-standard.scc \ # replace these SRCREVs with the real commit ids once you've had # the appropriate changes committed to the upstream linux-yocto repo -SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" -SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" +#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}" #LINUX_VERSION = "4.8" diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py index e41d81a31..82a2be122 100644 --- a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py +++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py @@ -512,7 +512,7 @@ class PythonRecipeHandler(RecipeHandler): except (OSError, subprocess.CalledProcessError): pass else: - for line in dep_output.decode('utf-8').splitlines(): + for line in dep_output.splitlines(): line = line.rstrip() dep, filename = line.split('\t', 1) if filename.endswith('/setup.py'): @@ -591,7 +591,7 @@ class PythonRecipeHandler(RecipeHandler): if 'stderr' not in popenargs: popenargs['stderr'] = subprocess.STDOUT try: - return subprocess.check_output(cmd, **popenargs) + return subprocess.check_output(cmd, **popenargs).decode('utf-8') except OSError as exc: logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc) raise diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks index b90002356..4bd9d6a65 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks +++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/systemd-bootdisk.wks @@ -4,7 +4,7 @@ part /boot --source bootimg-efi --sourceparams="loader=systemd-boot" --ondisk sda --label msdos --active --align 1024 -part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 +part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid part swap --ondisk sda --size 44 --label swap1 --fstype=swap diff --git a/import-layers/yocto-poky/scripts/lib/wic/filemap.py b/import-layers/yocto-poky/scripts/lib/wic/filemap.py index f3240ba8d..162603ed0 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/filemap.py +++ b/import-layers/yocto-poky/scripts/lib/wic/filemap.py @@ -543,9 +543,9 @@ def sparse_copy(src_fname, dst_fname, offset=0, skip=0): end = (last + 1) * fmap.block_size if start < skip < end: - start = skip - - fmap._f_image.seek(start, os.SEEK_SET) + fmap._f_image.seek(skip, os.SEEK_SET) + else: + fmap._f_image.seek(start, os.SEEK_SET) dst_file.seek(offset + start, os.SEEK_SET) chunk_size = 1024 * 1024 diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py b/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py index edf5e5d22..4c547e04a 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py +++ b/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py @@ -108,7 +108,7 @@ class DirectImageCreator(BaseImageCreator): if pnum == num: if part.no_table: return 0 - if self.ptable_format == 'msdos' and realnum > 3: + if self.ptable_format == 'msdos' and realnum > 3 and len(parts) > 4: # account for logical partition numbering, ex. sda5.. return realnum + 1 return realnum diff --git a/import-layers/yocto-poky/scripts/lib/wic/partition.py b/import-layers/yocto-poky/scripts/lib/wic/partition.py index 90f65a1e3..ec3aa6622 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/partition.py +++ b/import-layers/yocto-poky/scripts/lib/wic/partition.py @@ -146,6 +146,12 @@ class Partition(): oe_builddir, bootimg_dir, kernel_dir, rootfs_dir, native_sysroot) + # further processing required Partition.size to be an integer, make + # sure that it is one + if type(self.size) is not int: + msger.error("Partition %s internal size is not an integer. " \ + "This a bug in source plugin %s and needs to be fixed." \ + % (self.mountpoint, self.source)) def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir, rootfs_dir): @@ -157,7 +163,7 @@ class Partition(): out = exec_cmd(du_cmd) rootfs_size = out.split()[0] - self.size = rootfs_size + self.size = int(rootfs_size) self.source_file = rootfs def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir, @@ -184,6 +190,10 @@ class Partition(): if os.path.isfile(rootfs): os.remove(rootfs) + if not self.fstype: + msger.error("File system for partition %s not specified in kickstart, " \ + "use --fstype option" % (self.mountpoint)) + for prefix in ("ext", "btrfs", "vfat", "squashfs"): if self.fstype.startswith(prefix): method = getattr(self, "prepare_rootfs_" + prefix) @@ -194,7 +204,7 @@ class Partition(): # get the rootfs size in the right units for kickstart (kB) du_cmd = "du -Lbks %s" % rootfs out = exec_cmd(du_cmd) - self.size = out.split()[0] + self.size = int(out.split()[0]) break @@ -229,6 +239,9 @@ class Partition(): (self.fstype, extra_imagecmd, rootfs, label_str, rootfs_dir) exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) + mkfs_cmd = "fsck.%s -pvfD %s || [ $? -le 3 ]" % (self.fstype, rootfs) + exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) + def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo): """ @@ -375,7 +388,7 @@ class Partition(): out = exec_cmd(du_cmd) fs_size = out.split()[0] - self.size = fs_size + self.size = int(fs_size) def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot): """ diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py index 8bc362254..4adb80bec 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py +++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py @@ -234,5 +234,5 @@ class BootimgEFIPlugin(SourcePlugin): out = exec_cmd(du_cmd) bootimg_size = out.split()[0] - part.size = bootimg_size + part.size = int(bootimg_size) part.source_file = bootimg diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py index e0b11f95a..618dd4475 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py +++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py @@ -71,16 +71,16 @@ class RawCopyPlugin(SourcePlugin): dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno)) if 'skip' in source_params: - sparse_copy(src, dst, skip=source_params['skip']) + sparse_copy(src, dst, skip=int(source_params['skip'])) else: sparse_copy(src, dst) # get the size in the right units for kickstart (kB) du_cmd = "du -Lbks %s" % dst out = exec_cmd(du_cmd) - filesize = out.split()[0] + filesize = int(out.split()[0]) - if int(filesize) > int(part.size): + if filesize > part.size: part.size = filesize part.source_file = dst diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py b/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py index cb03009fc..9ea4a30cb 100644 --- a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py +++ b/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py @@ -92,7 +92,7 @@ class Image(): def add_partition(self, size, disk_name, mountpoint, source_file=None, fstype=None, label=None, fsopts=None, boot=False, align=None, no_table=False, part_type=None, uuid=None, system_id=None): - """ Add the next partition. Prtitions have to be added in the + """ Add the next partition. Partitions have to be added in the first-to-last order. """ ks_pnum = len(self.partitions) @@ -201,9 +201,10 @@ class Image(): part['num'] = 0 if disk['ptable_format'] == "msdos": - if disk['realpart'] > 3: - part['type'] = 'logical' - part['num'] = disk['realpart'] + 1 + if len(self.partitions) > 4: + if disk['realpart'] > 3: + part['type'] = 'logical' + part['num'] = disk['realpart'] + 1 disk['partitions'].append(num) msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d " @@ -292,7 +293,7 @@ class Image(): # even number of sectors. if part['mountpoint'] == "/boot" and part['fstype'] in ["vfat", "msdos"] \ and part['size'] % 2: - msger.debug("Substracting one sector from '%s' partition to " \ + msger.debug("Subtracting one sector from '%s' partition to " \ "get even number of sectors for the partition" % \ part['mountpoint']) part['size'] -= 1 diff --git a/import-layers/yocto-poky/scripts/oe-pkgdata-util b/import-layers/yocto-poky/scripts/oe-pkgdata-util index bb917b4fc..dbffd6a36 100755 --- a/import-layers/yocto-poky/scripts/oe-pkgdata-util +++ b/import-layers/yocto-poky/scripts/oe-pkgdata-util @@ -526,7 +526,7 @@ def main(): parser_lookup_recipe.set_defaults(func=lookup_recipe) parser_package_info = subparsers.add_parser('package-info', - help='Shows version, recipe and size information for one or more packages', + help='Show version, recipe and size information for one or more packages', description='Looks up the specified runtime package(s) and display information') parser_package_info.add_argument('pkg', nargs='*', help='Runtime package name to look up') parser_package_info.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)') diff --git a/import-layers/yocto-poky/scripts/oe-setup-builddir b/import-layers/yocto-poky/scripts/oe-setup-builddir index 93722e08a..e53f73c9c 100755 --- a/import-layers/yocto-poky/scripts/oe-setup-builddir +++ b/import-layers/yocto-poky/scripts/oe-setup-builddir @@ -61,7 +61,7 @@ if [ -n "$TEMPLATECONF" ]; then TEMPLATECONF="$OEROOT/$TEMPLATECONF" fi if [ ! -d "$TEMPLATECONF" ]; then - echo >&2 "Error: '$TEMPLATECONF' must be a directory containing local.conf & bblayers.conf" + echo >&2 "Error: TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'" exit 1 fi fi diff --git a/import-layers/yocto-poky/scripts/runqemu b/import-layers/yocto-poky/scripts/runqemu index dbe17abfc..6748cb258 100755 --- a/import-layers/yocto-poky/scripts/runqemu +++ b/import-layers/yocto-poky/scripts/runqemu @@ -167,6 +167,7 @@ class BaseConfig(object): self.kernel = '' self.kernel_cmdline = '' self.kernel_cmdline_script = '' + self.bootparams = '' self.dtb = '' self.fstype = '' self.kvm_enabled = False @@ -368,7 +369,7 @@ class BaseConfig(object): elif arg.startswith('qemuparams='): self.qemu_opt_script += ' %s' % arg[len('qemuparams='):] elif arg.startswith('bootparams='): - self.kernel_cmdline_script += ' %s' % arg[len('bootparams='):] + self.bootparams = arg[len('bootparams='):] elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)): self.check_arg_path(os.path.abspath(arg)) elif re.search('-image-', arg): @@ -676,17 +677,35 @@ class BaseConfig(object): else: self.nfs_server = '192.168.7.1' - nfs_instance = int(self.nfs_instance) - - mountd_rpcport = 21111 + nfs_instance - nfsd_rpcport = 11111 + nfs_instance - nfsd_port = 3049 + 2 * nfs_instance - mountd_port = 3048 + 2 * nfs_instance - unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port) - self.unfs_opts = unfs_opts + # Figure out a new nfs_instance to allow multiple qemus running. + # CentOS 7.1's ps doesn't print full command line without "ww" + # when invoke by subprocess.Popen(). + cmd = "ps auxww" + ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8') + pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) ' + all_instances = re.findall(pattern, ps, re.M) + if all_instances: + all_instances.sort(key=int) + self.nfs_instance = int(all_instances.pop()) + 1 + + mountd_rpcport = 21111 + self.nfs_instance + nfsd_rpcport = 11111 + self.nfs_instance + nfsd_port = 3049 + 2 * self.nfs_instance + mountd_port = 3048 + 2 * self.nfs_instance + + # Export vars for runqemu-export-rootfs + export_dict = { + 'NFS_INSTANCE': self.nfs_instance, + 'MOUNTD_RPCPORT': mountd_rpcport, + 'NFSD_RPCPORT': nfsd_rpcport, + 'NFSD_PORT': nfsd_port, + 'MOUNTD_PORT': mountd_port, + } + for k, v in export_dict.items(): + # Use '%s' since they are integers + os.putenv(k, '%s' % v) - p = '%s/.runqemu-sdk/pseudo' % os.getenv('HOME') - os.putenv('PSEUDO_LOCALSTATEDIR', p) + self.unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port) # Extract .tar.bz2 or .tar.bz if no self.nfs_dir if not self.nfs_dir: @@ -714,7 +733,7 @@ class BaseConfig(object): self.nfs_dir = dest # Start the userspace NFS server - cmd = 'runqemu-export-rootfs restart %s' % self.nfs_dir + cmd = 'runqemu-export-rootfs start %s' % self.nfs_dir logger.info('Running %s...' % cmd) if subprocess.call(cmd, shell=True) != 0: raise Exception('Failed to run %s' % cmd) @@ -723,6 +742,8 @@ class BaseConfig(object): def setup_slirp(self): + """Setup user networking""" + if self.fstype == 'nfs': self.setup_nfs() self.kernel_cmdline_script += ' ip=dhcp' @@ -790,14 +811,13 @@ class BaseConfig(object): logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.") return 1 self.tap = tap - n0 = tap[3:] - n1 = int(n0) * 2 + 1 - n2 = n1 + 1 - self.nfs_instance = n0 + tapnum = int(tap[3:]) + gateway = tapnum * 2 + 1 + client = gateway + 1 if self.fstype == 'nfs': self.setup_nfs() - self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (n2, n1) - mac = "52:54:00:12:34:%02x" % n2 + self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway) + mac = "52:54:00:12:34:%02x" % client qb_tap_opt = self.get('QB_TAP_OPT') if qb_tap_opt: qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap).replace('@MAC@', mac) @@ -840,11 +860,11 @@ class BaseConfig(object): vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \ % (self.rootfs, rootfs_format) elif subprocess.call(cmd2, shell=True) == 0: - logger.info('Using scsi drive') + logger.info('Using ide drive') vm_drive = "%s,format=%s" % (self.rootfs, rootfs_format) else: logger.warn("Can't detect drive type %s" % self.rootfs) - logger.warn('Tring to use virtio block drive') + logger.warn('Trying to use virtio block drive') vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format) self.rootfs_options = '%s -no-reboot' % vm_drive self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT')) @@ -935,7 +955,9 @@ class BaseConfig(object): def start_qemu(self): if self.kernel: - kernel_opts = "-kernel %s -append '%s %s %s'" % (self.kernel, self.kernel_cmdline, self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND')) + kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline, + self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'), + self.bootparams) if self.dtb: kernel_opts += " -dtb %s" % self.dtb else: diff --git a/import-layers/yocto-poky/scripts/runqemu-export-rootfs b/import-layers/yocto-poky/scripts/runqemu-export-rootfs index 3dee13116..7ebc07194 100755 --- a/import-layers/yocto-poky/scripts/runqemu-export-rootfs +++ b/import-layers/yocto-poky/scripts/runqemu-export-rootfs @@ -78,23 +78,17 @@ if [ ! -d "$PSEUDO_LOCALSTATEDIR" ]; then fi # rpc.mountd RPC port -NFS_MOUNTPROG=$[ 21111 + $NFS_INSTANCE ] +MOUNTD_RPCPORT=${MOUNTD_RPCPORT:=$[ 21111 + $NFS_INSTANCE ]} # rpc.nfsd RPC port -NFS_NFSPROG=$[ 11111 + $NFS_INSTANCE ] -# NFS port number -NFS_PORT=$[ 3049 + 2 * $NFS_INSTANCE ] +NFSD_RPCPORT=${NFSD_RPCPORT:=$[ 11111 + $NFS_INSTANCE ]} +# NFS server port number +NFSD_PORT=${NFSD_PORT:=$[ 3049 + 2 * $NFS_INSTANCE ]} # mountd port number -MOUNT_PORT=$[ 3048 + 2 * $NFS_INSTANCE ] +MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]} ## For debugging you would additionally add ## --debug all -UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -x $NFS_NFSPROG -n $NFS_PORT -y $NFS_MOUNTPROG -m $MOUNT_PORT" - -# Setup the exports file -if [ "$1" = "start" ]; then - echo "Creating exports file..." - echo "$NFS_EXPORT_DIR (rw,async,no_root_squash,no_all_squash,insecure)" > $EXPORTS -fi +UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -x $NFSD_RPCPORT -n $NFSD_PORT -y $MOUNTD_RPCPORT -m $MOUNTD_PORT" # See how we were called. case "$1" in @@ -114,6 +108,9 @@ case "$1" in exit 1 fi + echo "Creating exports file..." + echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS + echo "Starting User Mode nfsd" echo " $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS" $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/bin/unfsd $UNFSD_OPTS diff --git a/import-layers/yocto-poky/scripts/send-pull-request b/import-layers/yocto-poky/scripts/send-pull-request index 575549db3..883deacb0 100755 --- a/import-layers/yocto-poky/scripts/send-pull-request +++ b/import-layers/yocto-poky/scripts/send-pull-request @@ -158,11 +158,16 @@ GIT_EXTRA_CC=$(for R in $EXTRA_CC; do echo -n "--cc='$R' "; done) unset IFS # Handoff to git-send-email. It will perform the send confirmation. +# Mail threading was already handled by git-format-patch in +# create-pull-request, so we must not allow git-send-email to +# add In-Reply-To and References headers again. PATCHES=$(echo $PDIR/*.patch) if [ $AUTO_CL -eq 1 ]; then # Send the cover letter to every recipient, both specified as well as # harvested. Then remove it from the patches list. - eval "git send-email $GIT_TO $GIT_CC $GIT_EXTRA_CC --confirm=always --no-chain-reply-to --suppress-cc=all $CL" + # --no-thread is redundant here (only sending a single message) and + # merely added for the sake of consistency. + eval "git send-email $GIT_TO $GIT_CC $GIT_EXTRA_CC --confirm=always --no-thread --suppress-cc=all $CL" if [ $? -eq 1 ]; then echo "ERROR: failed to send cover-letter with automatic recipients." exit 1 @@ -172,7 +177,7 @@ fi # Send the patch to the specified recipients and, if -c was specified, those git # finds in this specific patch. -eval "git send-email $GIT_TO $GIT_EXTRA_CC --confirm=always --no-chain-reply-to $GITSOBCC $PATCHES" +eval "git send-email $GIT_TO $GIT_EXTRA_CC --confirm=always --no-thread $GITSOBCC $PATCHES" if [ $? -eq 1 ]; then echo "ERROR: failed to send patches." exit 1 diff --git a/import-layers/yocto-poky/scripts/sstate-sysroot-cruft.sh b/import-layers/yocto-poky/scripts/sstate-sysroot-cruft.sh index b7ed8ea84..b6166aa1b 100755 --- a/import-layers/yocto-poky/scripts/sstate-sysroot-cruft.sh +++ b/import-layers/yocto-poky/scripts/sstate-sysroot-cruft.sh @@ -141,6 +141,18 @@ WHITELIST="${WHITELIST} \ .*/var/cache/fontconfig/ \ " +# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes: +# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d) +# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d) +# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d) +# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d) +# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d) +# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811 +# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061 +WHITELIST="${WHITELIST} \ + [^/]*/etc/ld.so.conf \ +" + SYSROOTS="`readlink -f ${tmpdir}`/sysroots/" mkdir ${OUTPUT} diff --git a/import-layers/yocto-poky/scripts/test-dependencies.sh b/import-layers/yocto-poky/scripts/test-dependencies.sh index 00c50e0d6..0b94de860 100755 --- a/import-layers/yocto-poky/scripts/test-dependencies.sh +++ b/import-layers/yocto-poky/scripts/test-dependencies.sh @@ -141,7 +141,7 @@ build_all() { bitbake -k $targets 2>&1 | tee -a ${OUTPUT1}/complete.log RESULT+=${PIPESTATUS[0]} grep "ERROR: Task.*failed" ${OUTPUT1}/complete.log > ${OUTPUT1}/failed-tasks.log - cat ${OUTPUT1}/failed-tasks.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' | sort -u > ${OUTPUT1}/failed-recipes.log + cat ${OUTPUT1}/failed-tasks.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb:.*@@g' | sort -u > ${OUTPUT1}/failed-recipes.log } build_every_recipe() { @@ -178,7 +178,7 @@ build_every_recipe() { RESULT+=${RECIPE_RESULT} mv ${OUTPUTB}/${recipe}.log ${OUTPUTB}/failed/ grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | tee -a ${OUTPUTB}/failed-tasks.log - grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb;.*@@g' >> ${OUTPUTB}/failed-recipes.log + grep "ERROR: Task.*failed" ${OUTPUTB}/failed/${recipe}.log | sed 's@.*/@@g; s@_.*@@g; s@\.bb, .*@@g; s@\.bb:.*@@g' >> ${OUTPUTB}/failed-recipes.log # and append also ${recipe} in case the failed task was from some dependency echo ${recipe} >> ${OUTPUTB}/failed-recipes.log else |