summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-06-17 12:52:15 +0200
committerIngo Molnar <mingo@elte.hu>2009-06-17 12:56:49 +0200
commiteadb8a091b27a840de7450f84ecff5ef13476424 (patch)
tree58c3782d40def63baa8167f3d31e3048cb4c7660 /kernel
parent73874005cd8800440be4299bd095387fff4b90ac (diff)
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
downloadblackbird-op-linux-eadb8a091b27a840de7450f84ecff5ef13476424.tar.gz
blackbird-op-linux-eadb8a091b27a840de7450f84ecff5ef13476424.zip
Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/Kconfig arch/x86/kernel/traps.c arch/x86/power/cpu.c arch/x86/power/cpu_32.c kernel/Makefile Semantic conflict: arch/x86/kernel/hw_breakpoint.c Merge reason: Resolve the conflicts, move from put_cpu_no_sched() to put_cpu() in arch/x86/kernel/hw_breakpoint.c. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/async.c13
-rw-r--r--kernel/audit_tree.c6
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/compat.c11
-rw-r--r--kernel/cpuset.c260
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/exit.c17
-rw-r--r--kernel/fork.c38
-rw-r--r--kernel/futex.c1208
-rw-r--r--kernel/groups.c288
-rw-r--r--kernel/hrtimer.c58
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/handle.c69
-rw-r--r--kernel/irq/internals.h5
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/irq/numa_migrate.c38
-rw-r--r--kernel/kallsyms.c134
-rw-r--r--kernel/kexec.c16
-rw-r--r--kernel/kfifo.c4
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/lockdep_internals.h4
-rw-r--r--kernel/module.c66
-rw-r--r--kernel/mutex.c31
-rw-r--r--kernel/panic.c35
-rw-r--r--kernel/params.c46
-rw-r--r--kernel/perf_counter.c4339
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/hibernate.c (renamed from kernel/power/disk.c)59
-rw-r--r--kernel/power/hibernate_nvs.c135
-rw-r--r--kernel/power/main.c526
-rw-r--r--kernel/power/power.h25
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/power/snapshot.c80
-rw-r--r--kernel/power/suspend.c300
-rw-r--r--kernel/power/suspend_test.c187
-rw-r--r--kernel/power/swsusp.c198
-rw-r--r--kernel/printk.c33
-rw-r--r--kernel/profile.c14
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/rcupreempt.c8
-rw-r--r--kernel/rcutree.c25
-rw-r--r--kernel/rcutree_trace.c64
-rw-r--r--kernel/rtmutex.c250
-rw-r--r--kernel/rtmutex_common.h8
-rw-r--r--kernel/sched.c401
-rw-r--r--kernel/sched_clock.c3
-rw-r--r--kernel/sched_cpupri.c8
-rw-r--r--kernel/sched_fair.c13
-rw-r--r--kernel/sched_idletask.c3
-rw-r--r--kernel/sched_rt.c2
-rw-r--r--kernel/signal.c78
-rw-r--r--kernel/slow-work.c27
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/sys.c290
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c71
-rw-r--r--kernel/time/clockevents.c14
-rw-r--r--kernel/time/clocksource.c23
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-oneshot.c17
-rw-r--r--kernel/time/tick-sched.c7
-rw-r--r--kernel/time/timekeeping.c9
-rw-r--r--kernel/timer.c141
-rw-r--r--kernel/trace/Kconfig70
-rw-r--r--kernel/trace/Makefile5
-rw-r--r--kernel/trace/blktrace.c91
-rw-r--r--kernel/trace/ftrace.c79
-rw-r--r--kernel/trace/ring_buffer.c114
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_filter.c6
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_output.c85
-rw-r--r--kernel/trace/trace_output.h4
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/trace/trace_sysprof.c3
-rw-r--r--kernel/user.c67
-rw-r--r--kernel/wait.c2
85 files changed, 8179 insertions, 2197 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 18ad1110b226..f88decb1b445 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -11,6 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
async.o
+obj-y += groups.o
ifdef CONFIG_FUNCTION_TRACER
# Do not trace debug files and internal ftrace files
@@ -97,6 +98,7 @@ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/async.c b/kernel/async.c
index 968ef9457d4e..27235f5de198 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -92,19 +92,18 @@ extern int initcall_debug;
static async_cookie_t __lowest_in_progress(struct list_head *running)
{
struct async_entry *entry;
+
if (!list_empty(running)) {
entry = list_first_entry(running,
struct async_entry, list);
return entry->cookie;
- } else if (!list_empty(&async_pending)) {
- entry = list_first_entry(&async_pending,
- struct async_entry, list);
- return entry->cookie;
- } else {
- /* nothing in progress... next_cookie is "infinity" */
- return next_cookie;
}
+ list_for_each_entry(entry, &async_pending, list)
+ if (entry->running == running)
+ return entry->cookie;
+
+ return next_cookie; /* "infinity" value */
}
static async_cookie_t lowest_in_progress(struct list_head *running)
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 6e7351739a82..1f6396d76687 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -568,7 +568,7 @@ void audit_trim_trees(void)
if (err)
goto skip_it;
- root_mnt = collect_mounts(path.mnt, path.dentry);
+ root_mnt = collect_mounts(&path);
path_put(&path);
if (!root_mnt)
goto skip_it;
@@ -660,7 +660,7 @@ int audit_add_tree_rule(struct audit_krule *rule)
err = kern_path(tree->pathname, 0, &path);
if (err)
goto Err;
- mnt = collect_mounts(path.mnt, path.dentry);
+ mnt = collect_mounts(&path);
path_put(&path);
if (!mnt) {
err = -ENOMEM;
@@ -720,7 +720,7 @@ int audit_tag_tree(char *old, char *new)
err = kern_path(new, 0, &path);
if (err)
return err;
- tagged = collect_mounts(path.mnt, path.dentry);
+ tagged = collect_mounts(&path);
path_put(&path);
if (!tagged)
return -ENOMEM;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a7267bfd3765..3fb789f6df94 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -46,6 +46,7 @@
#include <linux/cgroupstats.h>
#include <linux/hash.h>
#include <linux/namei.h>
+#include <linux/smp_lock.h>
#include <asm/atomic.h>
@@ -900,6 +901,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
struct cgroup *cgrp = &root->top_cgroup;
struct cgroup_sb_opts opts;
+ lock_kernel();
mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
@@ -927,6 +929,7 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
kfree(opts.release_agent);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
+ unlock_kernel();
return ret;
}
diff --git a/kernel/compat.c b/kernel/compat.c
index 42d56544460f..f6c204f07ea6 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -882,6 +882,17 @@ compat_sys_rt_sigtimedwait (compat_sigset_t __user *uthese,
}
+asmlinkage long
+compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo)
+{
+ siginfo_t info;
+
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+ return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
+}
+
#ifdef __ARCH_WANT_COMPAT_SYS_TIME
/* compat_time_t is a 32 bit "long" and needs to get converted. */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 026faccca869..7e75a41bd508 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -97,12 +97,6 @@ struct cpuset {
struct cpuset *parent; /* my parent */
- /*
- * Copy of global cpuset_mems_generation as of the most
- * recent time this cpuset changed its mems_allowed.
- */
- int mems_generation;
-
struct fmeter fmeter; /* memory_pressure filter */
/* partition number for rebuild_sched_domains() */
@@ -176,27 +170,6 @@ static inline int is_spread_slab(const struct cpuset *cs)
return test_bit(CS_SPREAD_SLAB, &cs->flags);
}
-/*
- * Increment this integer everytime any cpuset changes its
- * mems_allowed value. Users of cpusets can track this generation
- * number, and avoid having to lock and reload mems_allowed unless
- * the cpuset they're using changes generation.
- *
- * A single, global generation is needed because cpuset_attach_task() could
- * reattach a task to a different cpuset, which must not have its
- * generation numbers aliased with those of that tasks previous cpuset.
- *
- * Generations are needed for mems_allowed because one task cannot
- * modify another's memory placement. So we must enable every task,
- * on every visit to __alloc_pages(), to efficiently check whether
- * its current->cpuset->mems_allowed has changed, requiring an update
- * of its current->mems_allowed.
- *
- * Since writes to cpuset_mems_generation are guarded by the cgroup lock
- * there is no need to mark it atomic.
- */
-static int cpuset_mems_generation;
-
static struct cpuset top_cpuset = {
.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
};
@@ -228,8 +201,9 @@ static struct cpuset top_cpuset = {
* If a task is only holding callback_mutex, then it has read-only
* access to cpusets.
*
- * The task_struct fields mems_allowed and mems_generation may only
- * be accessed in the context of that task, so require no locks.
+ * Now, the task_struct fields mems_allowed and mempolicy may be changed
+ * by other task, we use alloc_lock in the task_struct fields to protect
+ * them.
*
* The cpuset_common_file_read() handlers only hold callback_mutex across
* small pieces of code, such as when reading out possibly multi-word
@@ -331,75 +305,22 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
}
-/**
- * cpuset_update_task_memory_state - update task memory placement
- *
- * If the current tasks cpusets mems_allowed changed behind our
- * backs, update current->mems_allowed, mems_generation and task NUMA
- * mempolicy to the new value.
- *
- * Task mempolicy is updated by rebinding it relative to the
- * current->cpuset if a task has its memory placement changed.
- * Do not call this routine if in_interrupt().
- *
- * Call without callback_mutex or task_lock() held. May be
- * called with or without cgroup_mutex held. Thanks in part to
- * 'the_top_cpuset_hack', the task's cpuset pointer will never
- * be NULL. This routine also might acquire callback_mutex during
- * call.
- *
- * Reading current->cpuset->mems_generation doesn't need task_lock
- * to guard the current->cpuset derefence, because it is guarded
- * from concurrent freeing of current->cpuset using RCU.
- *
- * The rcu_dereference() is technically probably not needed,
- * as I don't actually mind if I see a new cpuset pointer but
- * an old value of mems_generation. However this really only
- * matters on alpha systems using cpusets heavily. If I dropped
- * that rcu_dereference(), it would save them a memory barrier.
- * For all other arch's, rcu_dereference is a no-op anyway, and for
- * alpha systems not using cpusets, another planned optimization,
- * avoiding the rcu critical section for tasks in the root cpuset
- * which is statically allocated, so can't vanish, will make this
- * irrelevant. Better to use RCU as intended, than to engage in
- * some cute trick to save a memory barrier that is impossible to
- * test, for alpha systems using cpusets heavily, which might not
- * even exist.
- *
- * This routine is needed to update the per-task mems_allowed data,
- * within the tasks context, when it is trying to allocate memory
- * (in various mm/mempolicy.c routines) and notices that some other
- * task has been modifying its cpuset.
+/*
+ * update task's spread flag if cpuset's page/slab spread flag is set
+ *
+ * Called with callback_mutex/cgroup_mutex held
*/
-
-void cpuset_update_task_memory_state(void)
+static void cpuset_update_task_spread_flag(struct cpuset *cs,
+ struct task_struct *tsk)
{
- int my_cpusets_mem_gen;
- struct task_struct *tsk = current;
- struct cpuset *cs;
-
- rcu_read_lock();
- my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
- rcu_read_unlock();
-
- if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
- mutex_lock(&callback_mutex);
- task_lock(tsk);
- cs = task_cs(tsk); /* Maybe changed when task not locked */
- guarantee_online_mems(cs, &tsk->mems_allowed);
- tsk->cpuset_mems_generation = cs->mems_generation;
- if (is_spread_page(cs))
- tsk->flags |= PF_SPREAD_PAGE;
- else
- tsk->flags &= ~PF_SPREAD_PAGE;
- if (is_spread_slab(cs))
- tsk->flags |= PF_SPREAD_SLAB;
- else
- tsk->flags &= ~PF_SPREAD_SLAB;
- task_unlock(tsk);
- mutex_unlock(&callback_mutex);
- mpol_rebind_task(tsk, &tsk->mems_allowed);
- }
+ if (is_spread_page(cs))
+ tsk->flags |= PF_SPREAD_PAGE;
+ else
+ tsk->flags &= ~PF_SPREAD_PAGE;
+ if (is_spread_slab(cs))
+ tsk->flags |= PF_SPREAD_SLAB;
+ else
+ tsk->flags &= ~PF_SPREAD_SLAB;
}
/*
@@ -1007,14 +928,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* other task, the task_struct mems_allowed that we are hacking
* is for our current task, which must allocate new pages for that
* migrating memory region.
- *
- * We call cpuset_update_task_memory_state() before hacking
- * our tasks mems_allowed, so that we are assured of being in
- * sync with our tasks cpuset, and in particular, callbacks to
- * cpuset_update_task_memory_state() from nested page allocations
- * won't see any mismatch of our cpuset and task mems_generation
- * values, so won't overwrite our hacked tasks mems_allowed
- * nodemask.
*/
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
@@ -1022,22 +935,37 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
{
struct task_struct *tsk = current;
- cpuset_update_task_memory_state();
-
- mutex_lock(&callback_mutex);
tsk->mems_allowed = *to;
- mutex_unlock(&callback_mutex);
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
- mutex_lock(&callback_mutex);
guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
- mutex_unlock(&callback_mutex);
}
/*
- * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new
- * nodes if memory_migrate flag is set. Called with cgroup_mutex held.
+ * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
+ * @tsk: the task to change
+ * @newmems: new nodes that the task will be set
+ *
+ * In order to avoid seeing no nodes if the old and new nodes are disjoint,
+ * we structure updates as setting all new allowed nodes, then clearing newly
+ * disallowed ones.
+ *
+ * Called with task's alloc_lock held
+ */
+static void cpuset_change_task_nodemask(struct task_struct *tsk,
+ nodemask_t *newmems)
+{
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, &tsk->mems_allowed);
+ mpol_rebind_task(tsk, newmems);
+ tsk->mems_allowed = *newmems;
+}
+
+/*
+ * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
+ * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
+ * memory_migrate flag is set. Called with cgroup_mutex held.
*/
static void cpuset_change_nodemask(struct task_struct *p,
struct cgroup_scanner *scan)
@@ -1046,12 +974,19 @@ static void cpuset_change_nodemask(struct task_struct *p,
struct cpuset *cs;
int migrate;
const nodemask_t *oldmem = scan->data;
+ nodemask_t newmems;
+
+ cs = cgroup_cs(scan->cg);
+ guarantee_online_mems(cs, &newmems);
+
+ task_lock(p);
+ cpuset_change_task_nodemask(p, &newmems);
+ task_unlock(p);
mm = get_task_mm(p);
if (!mm)
return;
- cs = cgroup_cs(scan->cg);
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
@@ -1104,10 +1039,10 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
/*
* Handle user request to change the 'mems' memory placement
* of a cpuset. Needs to validate the request, update the
- * cpusets mems_allowed and mems_generation, and for each
- * task in the cpuset, rebind any vma mempolicies and if
- * the cpuset is marked 'memory_migrate', migrate the tasks
- * pages to the new memory.
+ * cpusets mems_allowed, and for each task in the cpuset,
+ * update mems_allowed and rebind task's mempolicy and any vma
+ * mempolicies and if the cpuset is marked 'memory_migrate',
+ * migrate the tasks pages to the new memory.
*
* Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
@@ -1160,7 +1095,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs->mems_allowed;
- cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
update_tasks_nodemask(cs, &oldmem, &heap);
@@ -1193,6 +1127,46 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
}
/*
+ * cpuset_change_flag - make a task's spread flags the same as its cpuset's
+ * @tsk: task to be updated
+ * @scan: struct cgroup_scanner containing the cgroup of the task
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup.
+ *
+ * We don't need to re-check for the cgroup/cpuset membership, since we're
+ * holding cgroup_lock() at this point.
+ */
+static void cpuset_change_flag(struct task_struct *tsk,
+ struct cgroup_scanner *scan)
+{
+ cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+}
+
+/*
+ * update_tasks_flags - update the spread flags of tasks in the cpuset.
+ * @cs: the cpuset in which each task's spread flags needs to be changed
+ * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ *
+ * Called with cgroup_mutex held
+ *
+ * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * calling callback functions for each.
+ *
+ * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * if @heap != NULL.
+ */
+static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
+{
+ struct cgroup_scanner scan;
+
+ scan.cg = cs->css.cgroup;
+ scan.test_task = NULL;
+ scan.process_task = cpuset_change_flag;
+ scan.heap = heap;
+ cgroup_scan_tasks(&scan);
+}
+
+/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (see cpuset_flagbits_t)
* cs: the cpuset to update
@@ -1205,8 +1179,10 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
struct cpuset *trialcs;
- int err;
int balance_flag_changed;
+ int spread_flag_changed;
+ struct ptr_heap heap;
+ int err;
trialcs = alloc_trial_cpuset(cs);
if (!trialcs)
@@ -1221,9 +1197,16 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
if (err < 0)
goto out;
+ err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
+ if (err < 0)
+ goto out;
+
balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(trialcs));
+ spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
+ || (is_spread_page(cs) != is_spread_page(trialcs)));
+
mutex_lock(&callback_mutex);
cs->flags = trialcs->flags;
mutex_unlock(&callback_mutex);
@@ -1231,6 +1214,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
async_rebuild_sched_domains();
+ if (spread_flag_changed)
+ update_tasks_flags(cs, &heap);
+ heap_free(&heap);
out:
free_trial_cpuset(trialcs);
return err;
@@ -1372,15 +1358,20 @@ static void cpuset_attach(struct cgroup_subsys *ss,
if (cs == &top_cpuset) {
cpumask_copy(cpus_attach, cpu_possible_mask);
+ to = node_possible_map;
} else {
- mutex_lock(&callback_mutex);
guarantee_online_cpus(cs, cpus_attach);
- mutex_unlock(&callback_mutex);
+ guarantee_online_mems(cs, &to);
}
err = set_cpus_allowed_ptr(tsk, cpus_attach);
if (err)
return;
+ task_lock(tsk);
+ cpuset_change_task_nodemask(tsk, &to);
+ task_unlock(tsk);
+ cpuset_update_task_spread_flag(cs, tsk);
+
from = oldcs->mems_allowed;
to = cs->mems_allowed;
mm = get_task_mm(tsk);
@@ -1442,11 +1433,9 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, val);
- cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, val);
- cs->mems_generation = cpuset_mems_generation++;
break;
default:
retval = -EINVAL;
@@ -1786,8 +1775,6 @@ static struct cgroup_subsys_state *cpuset_create(
struct cpuset *parent;
if (!cont->parent) {
- /* This is early initialization for the top cgroup */
- top_cpuset.mems_generation = cpuset_mems_generation++;
return &top_cpuset.css;
}
parent = cgroup_cs(cont->parent);
@@ -1799,7 +1786,6 @@ static struct cgroup_subsys_state *cpuset_create(
return ERR_PTR(-ENOMEM);
}
- cpuset_update_task_memory_state();
cs->flags = 0;
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
@@ -1808,7 +1794,6 @@ static struct cgroup_subsys_state *cpuset_create(
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
nodes_clear(cs->mems_allowed);
- cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
cs->relax_domain_level = -1;
@@ -1827,8 +1812,6 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct cpuset *cs = cgroup_cs(cont);
- cpuset_update_task_memory_state();
-
if (is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
@@ -1849,21 +1832,6 @@ struct cgroup_subsys cpuset_subsys = {
.early_init = 1,
};
-/*
- * cpuset_init_early - just enough so that the calls to
- * cpuset_update_task_memory_state() in early init code
- * are harmless.
- */
-
-int __init cpuset_init_early(void)
-{
- alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
-
- top_cpuset.mems_generation = cpuset_mems_generation++;
- return 0;
-}
-
-
/**
* cpuset_init - initialize cpusets at system boot
*
@@ -1874,11 +1842,13 @@ int __init cpuset_init(void)
{
int err = 0;
+ if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
+ BUG();
+
cpumask_setall(top_cpuset.cpus_allowed);
nodes_setall(top_cpuset.mems_allowed);
fmeter_init(&top_cpuset.fmeter);
- top_cpuset.mems_generation = cpuset_mems_generation++;
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
top_cpuset.relax_domain_level = -1;
diff --git a/kernel/cred.c b/kernel/cred.c
index 3a039189d707..1bb4d7e5d616 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -167,7 +167,7 @@ EXPORT_SYMBOL(prepare_creds);
/*
* Prepare credentials for current to perform an execve()
- * - The caller must hold current->cred_exec_mutex
+ * - The caller must hold current->cred_guard_mutex
*/
struct cred *prepare_exec_creds(void)
{
@@ -276,7 +276,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
struct cred *new;
int ret;
- mutex_init(&p->cred_exec_mutex);
+ mutex_init(&p->cred_guard_mutex);
if (
#ifdef CONFIG_KEYS
diff --git a/kernel/exit.c b/kernel/exit.c
index cab535c427b8..b6c90b5ef509 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -48,6 +48,7 @@
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
#include <linux/init_task.h>
+#include <linux/perf_counter.h>
#include <trace/events/sched.h>
#include <asm/uaccess.h>
@@ -154,6 +155,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+#ifdef CONFIG_PERF_COUNTERS
+ WARN_ON_ONCE(tsk->perf_counter_ctxp);
+#endif
trace_sched_process_free(tsk);
put_task_struct(tsk);
}
@@ -170,6 +174,7 @@ repeat:
atomic_dec(&__task_cred(p)->user->processes);
proc_flush_task(p);
+
write_lock_irq(&tasklist_lock);
tracehook_finish_release_task(p);
__exit_signal(p);
@@ -971,16 +976,19 @@ NORET_TYPE void do_exit(long code)
module_put(tsk->binfmt->module);
proc_exit_connector(tsk);
+
+ /*
+ * Flush inherited counters to the parent - before the parent
+ * gets woken up by child-exit notifications.
+ */
+ perf_counter_exit_task(tsk);
+
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
#ifdef CONFIG_FUTEX
- /*
- * This must happen late, after the PID is not
- * hashed anymore:
- */
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
@@ -1472,6 +1480,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
*/
if (*notask_error)
*notask_error = ret;
+ return 0;
}
if (likely(!ptrace) && unlikely(p->ptrace)) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 711468f3db2a..be022c200da6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -62,6 +62,7 @@
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
+#include <linux/perf_counter.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -177,7 +178,7 @@ void __init fork_init(unsigned long mempages)
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
- ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL);
+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
@@ -981,6 +982,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (!p)
goto fork_out;
+ ftrace_graph_init_task(p);
+
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
@@ -1094,6 +1097,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
+ retval = perf_counter_init_task(p);
+ if (retval)
+ goto bad_fork_cleanup_policy;
+
if ((retval = audit_alloc(p)))
goto bad_fork_cleanup_policy;
/* copy all the process information */
@@ -1130,8 +1137,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
}
- ftrace_graph_init_task(p);
-
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
@@ -1140,7 +1145,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
- goto bad_fork_free_graph;
+ goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1232,7 +1237,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
- goto bad_fork_free_graph;
+ goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
@@ -1267,8 +1272,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
cgroup_post_fork(p);
return p;
-bad_fork_free_graph:
- ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
@@ -1292,6 +1295,7 @@ bad_fork_cleanup_semundo:
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
+ perf_counter_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
@@ -1405,10 +1409,16 @@ long do_fork(unsigned long clone_flags,
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
+ } else if (!(clone_flags & CLONE_VM)) {
+ /*
+ * vfork will do an exec which will call
+ * set_task_comm()
+ */
+ perf_counter_fork(p);
}
audit_finish_fork(p);
- tracehook_report_clone(trace, regs, clone_flags, nr, p);
+ tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
@@ -1460,20 +1470,20 @@ void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
- sighand_ctor);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
+ SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
}
diff --git a/kernel/futex.c b/kernel/futex.c
index eef8cd26b5e5..80b5ce716596 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -19,6 +19,10 @@
* PRIVATE futexes by Eric Dumazet
* Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
*
+ * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
+ * Copyright (C) IBM Corporation, 2009
+ * Thanks to Thomas Gleixner for conceptual design and careful reviews.
+ *
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
@@ -96,8 +100,8 @@ struct futex_pi_state {
*/
struct futex_q {
struct plist_node list;
- /* There can only be a single waiter */
- wait_queue_head_t waiter;
+ /* Waiter reference */
+ struct task_struct *task;
/* Which hash list lock to use: */
spinlock_t *lock_ptr;
@@ -107,7 +111,9 @@ struct futex_q {
/* Optional priority inheritance state: */
struct futex_pi_state *pi_state;
- struct task_struct *task;
+
+ /* rt_waiter storage for requeue_pi: */
+ struct rt_mutex_waiter *rt_waiter;
/* Bitset for the optional bitmasked wakeup */
u32 bitset;
@@ -193,6 +199,7 @@ static void drop_futex_key_refs(union futex_key *key)
* @uaddr: virtual address of the futex
* @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
* @key: address where result is stored.
+ * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
*
* Returns a negative error code or 0
* The key words are stored in *key on success.
@@ -203,7 +210,8 @@ static void drop_futex_key_refs(union futex_key *key)
*
* lock_page() might sleep, the caller should not hold a spinlock.
*/
-static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
+static int
+get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
{
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
@@ -226,7 +234,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
return -EFAULT;
key->private.mm = mm;
key->private.address = address;
@@ -235,7 +243,7 @@ static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
}
again:
- err = get_user_pages_fast(address, 1, 0, &page);
+ err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
if (err < 0)
return err;
@@ -276,6 +284,25 @@ void put_futex_key(int fshared, union futex_key *key)
drop_futex_key_refs(key);
}
+/**
+ * futex_top_waiter() - Return the highest priority waiter on a futex
+ * @hb: the hash bucket the futex_q's reside in
+ * @key: the futex key (to distinguish it from other futex futex_q's)
+ *
+ * Must be called with the hb lock held.
+ */
+static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
+ union futex_key *key)
+{
+ struct futex_q *this;
+
+ plist_for_each_entry(this, &hb->chain, list) {
+ if (match_futex(&this->key, key))
+ return this;
+ }
+ return NULL;
+}
+
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
{
u32 curval;
@@ -537,28 +564,160 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
return 0;
}
+/**
+ * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
+ * @uaddr: the pi futex user address
+ * @hb: the pi futex hash bucket
+ * @key: the futex key associated with uaddr and hb
+ * @ps: the pi_state pointer where we store the result of the
+ * lookup
+ * @task: the task to perform the atomic lock work for. This will
+ * be "current" except in the case of requeue pi.
+ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
+ *
+ * Returns:
+ * 0 - ready to wait
+ * 1 - acquired the lock
+ * <0 - error
+ *
+ * The hb->lock and futex_key refs shall be held by the caller.
+ */
+static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
+ union futex_key *key,
+ struct futex_pi_state **ps,
+ struct task_struct *task, int set_waiters)
+{
+ int lock_taken, ret, ownerdied = 0;
+ u32 uval, newval, curval;
+
+retry:
+ ret = lock_taken = 0;
+
+ /*
+ * To avoid races, we attempt to take the lock here again
+ * (by doing a 0 -> TID atomic cmpxchg), while holding all
+ * the locks. It will most likely not succeed.
+ */
+ newval = task_pid_vnr(task);
+ if (set_waiters)
+ newval |= FUTEX_WAITERS;
+
+ curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
+
+ if (unlikely(curval == -EFAULT))
+ return -EFAULT;
+
+ /*
+ * Detect deadlocks.
+ */
+ if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
+ return -EDEADLK;
+
+ /*
+ * Surprise - we got the lock. Just return to userspace:
+ */
+ if (unlikely(!curval))
+ return 1;
+
+ uval = curval;
+
+ /*
+ * Set the FUTEX_WAITERS flag, so the owner will know it has someone
+ * to wake at the next unlock.
+ */
+ newval = curval | FUTEX_WAITERS;
+
+ /*
+ * There are two cases, where a futex might have no owner (the
+ * owner TID is 0): OWNER_DIED. We take over the futex in this
+ * case. We also do an unconditional take over, when the owner
+ * of the futex died.
+ *
+ * This is safe as we are protected by the hash bucket lock !
+ */
+ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
+ /* Keep the OWNER_DIED bit */
+ newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
+ ownerdied = 0;
+ lock_taken = 1;
+ }
+
+ curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
+
+ if (unlikely(curval == -EFAULT))
+ return -EFAULT;
+ if (unlikely(curval != uval))
+ goto retry;
+
+ /*
+ * We took the lock due to owner died take over.
+ */
+ if (unlikely(lock_taken))
+ return 1;
+
+ /*
+ * We dont have the lock. Look up the PI state (or create it if
+ * we are the first waiter):
+ */
+ ret = lookup_pi_state(uval, hb, key, ps);
+
+ if (unlikely(ret)) {
+ switch (ret) {
+ case -ESRCH:
+ /*
+ * No owner found for this futex. Check if the
+ * OWNER_DIED bit is set to figure out whether
+ * this is a robust futex or not.
+ */
+ if (get_futex_value_locked(&curval, uaddr))
+ return -EFAULT;
+
+ /*
+ * We simply start over in case of a robust
+ * futex. The code above will take the futex
+ * and return happy.
+ */
+ if (curval & FUTEX_OWNER_DIED) {
+ ownerdied = 1;
+ goto retry;
+ }
+ default:
+ break;
+ }
+ }
+
+ return ret;
+}
+
/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
static void wake_futex(struct futex_q *q)
{
- plist_del(&q->list, &q->list.plist);
+ struct task_struct *p = q->task;
+
/*
- * The lock in wake_up_all() is a crucial memory barrier after the
- * plist_del() and also before assigning to q->lock_ptr.
+ * We set q->lock_ptr = NULL _before_ we wake up the task. If
+ * a non futex wake up happens on another CPU then the task
+ * might exit and p would dereference a non existing task
+ * struct. Prevent this by holding a reference on p across the
+ * wake up.
*/
- wake_up(&q->waiter);
+ get_task_struct(p);
+
+ plist_del(&q->list, &q->list.plist);
/*
- * The waiting task can free the futex_q as soon as this is written,
- * without taking any locks. This must come last.
- *
- * A memory barrier is required here to prevent the following store to
- * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
- * end of wake_up() does not prevent this store from moving.
+ * The waiting task can free the futex_q as soon as
+ * q->lock_ptr = NULL is written, without taking any locks. A
+ * memory barrier is required here to prevent the following
+ * store to lock_ptr from getting ahead of the plist_del.
*/
smp_wmb();
q->lock_ptr = NULL;
+
+ wake_up_state(p, TASK_NORMAL);
+ put_task_struct(p);
}
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
@@ -677,7 +836,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
if (!bitset)
return -EINVAL;
- ret = get_futex_key(uaddr, fshared, &key);
+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
@@ -687,7 +846,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
- if (this->pi_state) {
+ if (this->pi_state || this->rt_waiter) {
ret = -EINVAL;
break;
}
@@ -723,10 +882,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
int ret, op_ret;
retry:
- ret = get_futex_key(uaddr1, fshared, &key1);
+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2);
+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -800,24 +959,185 @@ out:
return ret;
}
-/*
- * Requeue all waiters hashed on one physical page to another
- * physical page.
+/**
+ * requeue_futex() - Requeue a futex_q from one hb to another
+ * @q: the futex_q to requeue
+ * @hb1: the source hash_bucket
+ * @hb2: the target hash_bucket
+ * @key2: the new key for the requeued futex_q
+ */
+static inline
+void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
+ struct futex_hash_bucket *hb2, union futex_key *key2)
+{
+
+ /*
+ * If key1 and key2 hash to the same bucket, no need to
+ * requeue.
+ */
+ if (likely(&hb1->chain != &hb2->chain)) {
+ plist_del(&q->list, &hb1->chain);
+ plist_add(&q->list, &hb2->chain);
+ q->lock_ptr = &hb2->lock;
+#ifdef CONFIG_DEBUG_PI_LIST
+ q->list.plist.lock = &hb2->lock;
+#endif
+ }
+ get_futex_key_refs(key2);
+ q->key = *key2;
+}
+
+/**
+ * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
+ * q: the futex_q
+ * key: the key of the requeue target futex
+ *
+ * During futex_requeue, with requeue_pi=1, it is possible to acquire the
+ * target futex if it is uncontended or via a lock steal. Set the futex_q key
+ * to the requeue target futex so the waiter can detect the wakeup on the right
+ * futex, but remove it from the hb and NULL the rt_waiter so it can detect
+ * atomic lock acquisition. Must be called with the q->lock_ptr held.
+ */
+static inline
+void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
+{
+ drop_futex_key_refs(&q->key);
+ get_futex_key_refs(key);
+ q->key = *key;
+
+ WARN_ON(plist_node_empty(&q->list));
+ plist_del(&q->list, &q->list.plist);
+
+ WARN_ON(!q->rt_waiter);
+ q->rt_waiter = NULL;
+
+ wake_up_state(q->task, TASK_NORMAL);
+}
+
+/**
+ * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
+ * @pifutex: the user address of the to futex
+ * @hb1: the from futex hash bucket, must be locked by the caller
+ * @hb2: the to futex hash bucket, must be locked by the caller
+ * @key1: the from futex key
+ * @key2: the to futex key
+ * @ps: address to store the pi_state pointer
+ * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
+ *
+ * Try and get the lock on behalf of the top waiter if we can do it atomically.
+ * Wake the top waiter if we succeed. If the caller specified set_waiters,
+ * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
+ * hb1 and hb2 must be held by the caller.
+ *
+ * Returns:
+ * 0 - failed to acquire the lock atomicly
+ * 1 - acquired the lock
+ * <0 - error
+ */
+static int futex_proxy_trylock_atomic(u32 __user *pifutex,
+ struct futex_hash_bucket *hb1,
+ struct futex_hash_bucket *hb2,
+ union futex_key *key1, union futex_key *key2,
+ struct futex_pi_state **ps, int set_waiters)
+{
+ struct futex_q *top_waiter = NULL;
+ u32 curval;
+ int ret;
+
+ if (get_futex_value_locked(&curval, pifutex))
+ return -EFAULT;
+
+ /*
+ * Find the top_waiter and determine if there are additional waiters.
+ * If the caller intends to requeue more than 1 waiter to pifutex,
+ * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
+ * as we have means to handle the possible fault. If not, don't set
+ * the bit unecessarily as it will force the subsequent unlock to enter
+ * the kernel.
+ */
+ top_waiter = futex_top_waiter(hb1, key1);
+
+ /* There are no waiters, nothing for us to do. */
+ if (!top_waiter)
+ return 0;
+
+ /*
+ * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
+ * the contended case or if set_waiters is 1. The pi_state is returned
+ * in ps in contended cases.
+ */
+ ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
+ set_waiters);
+ if (ret == 1)
+ requeue_pi_wake_futex(top_waiter, key2);
+
+ return ret;
+}
+
+/**
+ * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
+ * uaddr1: source futex user address
+ * uaddr2: target futex user address
+ * nr_wake: number of waiters to wake (must be 1 for requeue_pi)
+ * nr_requeue: number of waiters to requeue (0-INT_MAX)
+ * requeue_pi: if we are attempting to requeue from a non-pi futex to a
+ * pi futex (pi to pi requeue is not supported)
+ *
+ * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
+ * uaddr2 atomically on behalf of the top waiter.
+ *
+ * Returns:
+ * >=0 - on success, the number of tasks requeued or woken
+ * <0 - on error
*/
static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
- int nr_wake, int nr_requeue, u32 *cmpval)
+ int nr_wake, int nr_requeue, u32 *cmpval,
+ int requeue_pi)
{
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
+ int drop_count = 0, task_count = 0, ret;
+ struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct plist_head *head1;
struct futex_q *this, *next;
- int ret, drop_count = 0;
+ u32 curval2;
+
+ if (requeue_pi) {
+ /*
+ * requeue_pi requires a pi_state, try to allocate it now
+ * without any locks in case it fails.
+ */
+ if (refill_pi_state_cache())
+ return -ENOMEM;
+ /*
+ * requeue_pi must wake as many tasks as it can, up to nr_wake
+ * + nr_requeue, since it acquires the rt_mutex prior to
+ * returning to userspace, so as to not leave the rt_mutex with
+ * waiters and no owner. However, second and third wake-ups
+ * cannot be predicted as they involve race conditions with the
+ * first wake and a fault while looking up the pi_state. Both
+ * pthread_cond_signal() and pthread_cond_broadcast() should
+ * use nr_wake=1.
+ */
+ if (nr_wake != 1)
+ return -EINVAL;
+ }
retry:
- ret = get_futex_key(uaddr1, fshared, &key1);
+ if (pi_state != NULL) {
+ /*
+ * We will have to lookup the pi_state again, so free this one
+ * to keep the accounting correct.
+ */
+ free_pi_state(pi_state);
+ pi_state = NULL;
+ }
+
+ ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
if (unlikely(ret != 0))
goto out;
- ret = get_futex_key(uaddr2, fshared, &key2);
+ ret = get_futex_key(uaddr2, fshared, &key2,
+ requeue_pi ? VERIFY_WRITE : VERIFY_READ);
if (unlikely(ret != 0))
goto out_put_key1;
@@ -852,32 +1172,99 @@ retry_private:
}
}
+ if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
+ /*
+ * Attempt to acquire uaddr2 and wake the top waiter. If we
+ * intend to requeue waiters, force setting the FUTEX_WAITERS
+ * bit. We force this here where we are able to easily handle
+ * faults rather in the requeue loop below.
+ */
+ ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
+ &key2, &pi_state, nr_requeue);
+
+ /*
+ * At this point the top_waiter has either taken uaddr2 or is
+ * waiting on it. If the former, then the pi_state will not
+ * exist yet, look it up one more time to ensure we have a
+ * reference to it.
+ */
+ if (ret == 1) {
+ WARN_ON(pi_state);
+ task_count++;
+ ret = get_futex_value_locked(&curval2, uaddr2);
+ if (!ret)
+ ret = lookup_pi_state(curval2, hb2, &key2,
+ &pi_state);
+ }
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EFAULT:
+ double_unlock_hb(hb1, hb2);
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
+ ret = get_user(curval2, uaddr2);
+ if (!ret)
+ goto retry;
+ goto out;
+ case -EAGAIN:
+ /* The owner was exiting, try again. */
+ double_unlock_hb(hb1, hb2);
+ put_futex_key(fshared, &key2);
+ put_futex_key(fshared, &key1);
+ cond_resched();
+ goto retry;
+ default:
+ goto out_unlock;
+ }
+ }
+
head1 = &hb1->chain;
plist_for_each_entry_safe(this, next, head1, list) {
- if (!match_futex (&this->key, &key1))
+ if (task_count - nr_wake >= nr_requeue)
+ break;
+
+ if (!match_futex(&this->key, &key1))
continue;
- if (++ret <= nr_wake) {
+
+ WARN_ON(!requeue_pi && this->rt_waiter);
+ WARN_ON(requeue_pi && !this->rt_waiter);
+
+ /*
+ * Wake nr_wake waiters. For requeue_pi, if we acquired the
+ * lock, we already woke the top_waiter. If not, it will be
+ * woken by futex_unlock_pi().
+ */
+ if (++task_count <= nr_wake && !requeue_pi) {
wake_futex(this);
- } else {
- /*
- * If key1 and key2 hash to the same bucket, no need to
- * requeue.
- */
- if (likely(head1 != &hb2->chain)) {
- plist_del(&this->list, &hb1->chain);
- plist_add(&this->list, &hb2->chain);
- this->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
- this->list.plist.lock = &hb2->lock;
-#endif
- }
- this->key = key2;
- get_futex_key_refs(&key2);
- drop_count++;
+ continue;
+ }
- if (ret - nr_wake >= nr_requeue)
- break;
+ /*
+ * Requeue nr_requeue waiters and possibly one more in the case
+ * of requeue_pi if we couldn't acquire the lock atomically.
+ */
+ if (requeue_pi) {
+ /* Prepare the waiter to take the rt_mutex. */
+ atomic_inc(&pi_state->refcount);
+ this->pi_state = pi_state;
+ ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
+ this->rt_waiter,
+ this->task, 1);
+ if (ret == 1) {
+ /* We got the lock. */
+ requeue_pi_wake_futex(this, &key2);
+ continue;
+ } else if (ret) {
+ /* -EDEADLK */
+ this->pi_state = NULL;
+ free_pi_state(pi_state);
+ goto out_unlock;
+ }
}
+ requeue_futex(this, hb1, hb2, &key2);
+ drop_count++;
}
out_unlock:
@@ -897,7 +1284,9 @@ out_put_keys:
out_put_key1:
put_futex_key(fshared, &key1);
out:
- return ret;
+ if (pi_state != NULL)
+ free_pi_state(pi_state);
+ return ret ? ret : task_count;
}
/* The key must be already stored in q->key. */
@@ -905,8 +1294,6 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- init_waitqueue_head(&q->waiter);
-
get_futex_key_refs(&q->key);
hb = hash_futex(&q->key);
q->lock_ptr = &hb->lock;
@@ -1117,35 +1504,149 @@ handle_fault:
*/
#define FLAGS_SHARED 0x01
#define FLAGS_CLOCKRT 0x02
+#define FLAGS_HAS_TIMEOUT 0x04
static long futex_wait_restart(struct restart_block *restart);
-static int futex_wait(u32 __user *uaddr, int fshared,
- u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
+/**
+ * fixup_owner() - Post lock pi_state and corner case management
+ * @uaddr: user address of the futex
+ * @fshared: whether the futex is shared (1) or not (0)
+ * @q: futex_q (contains pi_state and access to the rt_mutex)
+ * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
+ *
+ * After attempting to lock an rt_mutex, this function is called to cleanup
+ * the pi_state owner as well as handle race conditions that may allow us to
+ * acquire the lock. Must be called with the hb lock held.
+ *
+ * Returns:
+ * 1 - success, lock taken
+ * 0 - success, lock not taken
+ * <0 - on error (-EFAULT)
+ */
+static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
+ int locked)
{
- struct task_struct *curr = current;
- struct restart_block *restart;
- DECLARE_WAITQUEUE(wait, curr);
- struct futex_hash_bucket *hb;
- struct futex_q q;
- u32 uval;
- int ret;
- struct hrtimer_sleeper t;
- int rem = 0;
+ struct task_struct *owner;
+ int ret = 0;
- if (!bitset)
- return -EINVAL;
+ if (locked) {
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+ * did a lock-steal - fix up the PI-state in that case:
+ */
+ if (q->pi_state->owner != current)
+ ret = fixup_pi_state_owner(uaddr, q, current, fshared);
+ goto out;
+ }
- q.pi_state = NULL;
- q.bitset = bitset;
-retry:
- q.key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q.key);
- if (unlikely(ret != 0))
+ /*
+ * Catch the rare case, where the lock was released when we were on the
+ * way back before we locked the hash bucket.
+ */
+ if (q->pi_state->owner == current) {
+ /*
+ * Try to get the rt_mutex now. This might fail as some other
+ * task acquired the rt_mutex after we removed ourself from the
+ * rt_mutex waiters list.
+ */
+ if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
+ locked = 1;
+ goto out;
+ }
+
+ /*
+ * pi_state is incorrect, some other task did a lock steal and
+ * we returned due to timeout or signal without taking the
+ * rt_mutex. Too late. We can access the rt_mutex_owner without
+ * locking, as the other task is now blocked on the hash bucket
+ * lock. Fix the state up.
+ */
+ owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+ ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
goto out;
+ }
-retry_private:
- hb = queue_lock(&q);
+ /*
+ * Paranoia check. If we did not take the lock, then we should not be
+ * the owner, nor the pending owner, of the rt_mutex.
+ */
+ if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
+ printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
+ "pi-state %p\n", ret,
+ q->pi_state->pi_mutex.owner,
+ q->pi_state->owner);
+
+out:
+ return ret ? ret : locked;
+}
+
+/**
+ * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
+ * @hb: the futex hash bucket, must be locked by the caller
+ * @q: the futex_q to queue up on
+ * @timeout: the prepared hrtimer_sleeper, or null for no timeout
+ */
+static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
+ struct hrtimer_sleeper *timeout)
+{
+ queue_me(q, hb);
+
+ /*
+ * There might have been scheduling since the queue_me(), as we
+ * cannot hold a spinlock across the get_user() in case it
+ * faults, and we cannot just set TASK_INTERRUPTIBLE state when
+ * queueing ourselves into the futex hash. This code thus has to
+ * rely on the futex_wake() code removing us from hash when it
+ * wakes us up.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /* Arm the timer */
+ if (timeout) {
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+ if (!hrtimer_active(&timeout->timer))
+ timeout->task = NULL;
+ }
+
+ /*
+ * !plist_node_empty() is safe here without any lock.
+ * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
+ */
+ if (likely(!plist_node_empty(&q->list))) {
+ /*
+ * If the timer has already expired, current will already be
+ * flagged for rescheduling. Only call schedule if there
+ * is no timeout, or if it has yet to expire.
+ */
+ if (!timeout || timeout->task)
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+}
+
+/**
+ * futex_wait_setup() - Prepare to wait on a futex
+ * @uaddr: the futex userspace address
+ * @val: the expected value
+ * @fshared: whether the futex is shared (1) or not (0)
+ * @q: the associated futex_q
+ * @hb: storage for hash_bucket pointer to be returned to caller
+ *
+ * Setup the futex_q and locate the hash_bucket. Get the futex value and
+ * compare it with the expected value. Handle atomic faults internally.
+ * Return with the hb lock held and a q.key reference on success, and unlocked
+ * with no q.key reference on failure.
+ *
+ * Returns:
+ * 0 - uaddr contains val and hb has been locked
+ * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
+ */
+static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
+ struct futex_q *q, struct futex_hash_bucket **hb)
+{
+ u32 uval;
+ int ret;
/*
* Access the page AFTER the hash-bucket is locked.
@@ -1163,95 +1664,83 @@ retry_private:
* A consequence is that futex_wait() can return zero and absorb
* a wakeup when *uaddr != val on entry to the syscall. This is
* rare, but normal.
- *
- * For shared futexes, we hold the mmap semaphore, so the mapping
- * cannot have changed since we looked it up in get_futex_key.
*/
+retry:
+ q->key = FUTEX_KEY_INIT;
+ ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
+ if (unlikely(ret != 0))
+ return ret;
+
+retry_private:
+ *hb = queue_lock(q);
+
ret = get_futex_value_locked(&uval, uaddr);
- if (unlikely(ret)) {
- queue_unlock(&q, hb);
+ if (ret) {
+ queue_unlock(q, *hb);
ret = get_user(uval, uaddr);
if (ret)
- goto out_put_key;
+ goto out;
if (!fshared)
goto retry_private;
- put_futex_key(fshared, &q.key);
+ put_futex_key(fshared, &q->key);
goto retry;
}
- ret = -EWOULDBLOCK;
- if (unlikely(uval != val)) {
- queue_unlock(&q, hb);
- goto out_put_key;
- }
- /* Only actually queue if *uaddr contained val. */
- queue_me(&q, hb);
+ if (uval != val) {
+ queue_unlock(q, *hb);
+ ret = -EWOULDBLOCK;
+ }
- /*
- * There might have been scheduling since the queue_me(), as we
- * cannot hold a spinlock across the get_user() in case it
- * faults, and we cannot just set TASK_INTERRUPTIBLE state when
- * queueing ourselves into the futex hash. This code thus has to
- * rely on the futex_wake() code removing us from hash when it
- * wakes us up.
- */
+out:
+ if (ret)
+ put_futex_key(fshared, &q->key);
+ return ret;
+}
- /* add_wait_queue is the barrier after __set_current_state. */
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&q.waiter, &wait);
- /*
- * !plist_node_empty() is safe here without any lock.
- * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
- */
- if (likely(!plist_node_empty(&q.list))) {
- if (!abs_time)
- schedule();
- else {
- hrtimer_init_on_stack(&t.timer,
- clockrt ? CLOCK_REALTIME :
- CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- hrtimer_init_sleeper(&t, current);
- hrtimer_set_expires_range_ns(&t.timer, *abs_time,
- current->timer_slack_ns);
-
- hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
- if (!hrtimer_active(&t.timer))
- t.task = NULL;
+static int futex_wait(u32 __user *uaddr, int fshared,
+ u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct restart_block *restart;
+ struct futex_hash_bucket *hb;
+ struct futex_q q;
+ int ret;
- /*
- * the timer could have already expired, in which
- * case current would be flagged for rescheduling.
- * Don't bother calling schedule.
- */
- if (likely(t.task))
- schedule();
+ if (!bitset)
+ return -EINVAL;
- hrtimer_cancel(&t.timer);
+ q.pi_state = NULL;
+ q.bitset = bitset;
+ q.rt_waiter = NULL;
- /* Flag if a timeout occured */
- rem = (t.task == NULL);
+ if (abs_time) {
+ to = &timeout;
- destroy_hrtimer_on_stack(&t.timer);
- }
+ hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init_sleeper(to, current);
+ hrtimer_set_expires_range_ns(&to->timer, *abs_time,
+ current->timer_slack_ns);
}
- __set_current_state(TASK_RUNNING);
- /*
- * NOTE: we don't remove ourselves from the waitqueue because
- * we are the only user of it.
- */
+ /* Prepare to wait on uaddr. */
+ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+ if (ret)
+ goto out;
+
+ /* queue_me and wait for wakeup, timeout, or a signal. */
+ futex_wait_queue_me(hb, &q, to);
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
if (!unqueue_me(&q))
goto out_put_key;
ret = -ETIMEDOUT;
- if (rem)
+ if (to && !to->task)
goto out_put_key;
/*
@@ -1268,7 +1757,7 @@ retry_private:
restart->futex.val = val;
restart->futex.time = abs_time->tv64;
restart->futex.bitset = bitset;
- restart->futex.flags = 0;
+ restart->futex.flags = FLAGS_HAS_TIMEOUT;
if (fshared)
restart->futex.flags |= FLAGS_SHARED;
@@ -1280,6 +1769,10 @@ retry_private:
out_put_key:
put_futex_key(fshared, &q.key);
out:
+ if (to) {
+ hrtimer_cancel(&to->timer);
+ destroy_hrtimer_on_stack(&to->timer);
+ }
return ret;
}
@@ -1288,13 +1781,16 @@ static long futex_wait_restart(struct restart_block *restart)
{
u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
int fshared = 0;
- ktime_t t;
+ ktime_t t, *tp = NULL;
- t.tv64 = restart->futex.time;
+ if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
+ t.tv64 = restart->futex.time;
+ tp = &t;
+ }
restart->fn = do_no_restart_syscall;
if (restart->futex.flags & FLAGS_SHARED)
fshared = 1;
- return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
+ return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
restart->futex.bitset,
restart->futex.flags & FLAGS_CLOCKRT);
}
@@ -1310,11 +1806,10 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
int detect, ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
- struct task_struct *curr = current;
struct futex_hash_bucket *hb;
- u32 uval, newval, curval;
+ u32 uval;
struct futex_q q;
- int ret, lock_taken, ownerdied = 0;
+ int res, ret;
if (refill_pi_state_cache())
return -ENOMEM;
@@ -1328,90 +1823,25 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
}
q.pi_state = NULL;
+ q.rt_waiter = NULL;
retry:
q.key = FUTEX_KEY_INIT;
- ret = get_futex_key(uaddr, fshared, &q.key);
+ ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
retry_private:
hb = queue_lock(&q);
-retry_locked:
- ret = lock_taken = 0;
-
- /*
- * To avoid races, we attempt to take the lock here again
- * (by doing a 0 -> TID atomic cmpxchg), while holding all
- * the locks. It will most likely not succeed.
- */
- newval = task_pid_vnr(current);
-
- curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
-
- if (unlikely(curval == -EFAULT))
- goto uaddr_faulted;
-
- /*
- * Detect deadlocks. In case of REQUEUE_PI this is a valid
- * situation and we return success to user space.
- */
- if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) {
- ret = -EDEADLK;
- goto out_unlock_put_key;
- }
-
- /*
- * Surprise - we got the lock. Just return to userspace:
- */
- if (unlikely(!curval))
- goto out_unlock_put_key;
-
- uval = curval;
-
- /*
- * Set the WAITERS flag, so the owner will know it has someone
- * to wake at next unlock
- */
- newval = curval | FUTEX_WAITERS;
-
- /*
- * There are two cases, where a futex might have no owner (the
- * owner TID is 0): OWNER_DIED. We take over the futex in this
- * case. We also do an unconditional take over, when the owner
- * of the futex died.
- *
- * This is safe as we are protected by the hash bucket lock !
- */
- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
- /* Keep the OWNER_DIED bit */
- newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current);
- ownerdied = 0;
- lock_taken = 1;
- }
-
- curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
- if (unlikely(curval == -EFAULT))
- goto uaddr_faulted;
- if (unlikely(curval != uval))
- goto retry_locked;
-
- /*
- * We took the lock due to owner died take over.
- */
- if (unlikely(lock_taken))
- goto out_unlock_put_key;
-
- /*
- * We dont have the lock. Look up the PI state (or create it if
- * we are the first waiter):
- */
- ret = lookup_pi_state(uval, hb, &q.key, &q.pi_state);
-
+ ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
if (unlikely(ret)) {
switch (ret) {
-
+ case 1:
+ /* We got the lock. */
+ ret = 0;
+ goto out_unlock_put_key;
+ case -EFAULT:
+ goto uaddr_faulted;
case -EAGAIN:
/*
* Task is exiting and we just wait for the
@@ -1421,25 +1851,6 @@ retry_locked:
put_futex_key(fshared, &q.key);
cond_resched();
goto retry;
-
- case -ESRCH:
- /*
- * No owner found for this futex. Check if the
- * OWNER_DIED bit is set to figure out whether
- * this is a robust futex or not.
- */
- if (get_futex_value_locked(&curval, uaddr))
- goto uaddr_faulted;
-
- /*
- * We simply start over in case of a robust
- * futex. The code above will take the futex
- * and return happy.
- */
- if (curval & FUTEX_OWNER_DIED) {
- ownerdied = 1;
- goto retry_locked;
- }
default:
goto out_unlock_put_key;
}
@@ -1463,71 +1874,21 @@ retry_locked:
}
spin_lock(q.lock_ptr);
-
- if (!ret) {
- /*
- * Got the lock. We might not be the anticipated owner
- * if we did a lock-steal - fix up the PI-state in
- * that case:
- */
- if (q.pi_state->owner != curr)
- ret = fixup_pi_state_owner(uaddr, &q, curr, fshared);
- } else {
- /*
- * Catch the rare case, where the lock was released
- * when we were on the way back before we locked the
- * hash bucket.
- */
- if (q.pi_state->owner == curr) {
- /*
- * Try to get the rt_mutex now. This might
- * fail as some other task acquired the
- * rt_mutex after we removed ourself from the
- * rt_mutex waiters list.
- */
- if (rt_mutex_trylock(&q.pi_state->pi_mutex))
- ret = 0;
- else {
- /*
- * pi_state is incorrect, some other
- * task did a lock steal and we
- * returned due to timeout or signal
- * without taking the rt_mutex. Too
- * late. We can access the
- * rt_mutex_owner without locking, as
- * the other task is now blocked on
- * the hash bucket lock. Fix the state
- * up.
- */
- struct task_struct *owner;
- int res;
-
- owner = rt_mutex_owner(&q.pi_state->pi_mutex);
- res = fixup_pi_state_owner(uaddr, &q, owner,
- fshared);
-
- /* propagate -EFAULT, if the fixup failed */
- if (res)
- ret = res;
- }
- } else {
- /*
- * Paranoia check. If we did not take the lock
- * in the trylock above, then we should not be
- * the owner of the rtmutex, neither the real
- * nor the pending one:
- */
- if (rt_mutex_owner(&q.pi_state->pi_mutex) == curr)
- printk(KERN_ERR "futex_lock_pi: ret = %d "
- "pi-mutex: %p pi-state %p\n", ret,
- q.pi_state->pi_mutex.owner,
- q.pi_state->owner);
- }
- }
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+ */
+ res = fixup_owner(uaddr, fshared, &q, !ret);
+ /*
+ * If fixup_owner() returned an error, proprogate that. If it acquired
+ * the lock, clear our -ETIMEDOUT or -EINTR.
+ */
+ if (res)
+ ret = (res < 0) ? res : 0;
/*
- * If fixup_pi_state_owner() faulted and was unable to handle the
- * fault, unlock it and return the fault to userspace.
+ * If fixup_owner() faulted and was unable to handle the fault, unlock
+ * it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
rt_mutex_unlock(&q.pi_state->pi_mutex);
@@ -1535,9 +1896,7 @@ retry_locked:
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- if (to)
- destroy_hrtimer_on_stack(&to->timer);
- return ret != -EINTR ? ret : -ERESTARTNOINTR;
+ goto out;
out_unlock_put_key:
queue_unlock(&q, hb);
@@ -1547,7 +1906,7 @@ out_put_key:
out:
if (to)
destroy_hrtimer_on_stack(&to->timer);
- return ret;
+ return ret != -EINTR ? ret : -ERESTARTNOINTR;
uaddr_faulted:
/*
@@ -1570,7 +1929,6 @@ uaddr_faulted:
goto retry;
}
-
/*
* Userspace attempted a TID -> 0 atomic transition, and failed.
* This is the in-kernel slowpath: we look up the PI state (if any),
@@ -1594,7 +1952,7 @@ retry:
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM;
- ret = get_futex_key(uaddr, fshared, &key);
+ ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
if (unlikely(ret != 0))
goto out;
@@ -1672,6 +2030,229 @@ pi_faulted:
return ret;
}
+/**
+ * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
+ * @hb: the hash_bucket futex_q was original enqueued on
+ * @q: the futex_q woken while waiting to be requeued
+ * @key2: the futex_key of the requeue target futex
+ * @timeout: the timeout associated with the wait (NULL if none)
+ *
+ * Detect if the task was woken on the initial futex as opposed to the requeue
+ * target futex. If so, determine if it was a timeout or a signal that caused
+ * the wakeup and return the appropriate error code to the caller. Must be
+ * called with the hb lock held.
+ *
+ * Returns
+ * 0 - no early wakeup detected
+ * <0 - -ETIMEDOUT or -ERESTARTNOINTR
+ */
+static inline
+int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
+ struct futex_q *q, union futex_key *key2,
+ struct hrtimer_sleeper *timeout)
+{
+ int ret = 0;
+
+ /*
+ * With the hb lock held, we avoid races while we process the wakeup.
+ * We only need to hold hb (and not hb2) to ensure atomicity as the
+ * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
+ * It can't be requeued from uaddr2 to something else since we don't
+ * support a PI aware source futex for requeue.
+ */
+ if (!match_futex(&q->key, key2)) {
+ WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
+ /*
+ * We were woken prior to requeue by a timeout or a signal.
+ * Unqueue the futex_q and determine which it was.
+ */
+ plist_del(&q->list, &q->list.plist);
+ drop_futex_key_refs(&q->key);
+
+ if (timeout && !timeout->task)
+ ret = -ETIMEDOUT;
+ else
+ ret = -ERESTARTNOINTR;
+ }
+ return ret;
+}
+
+/**
+ * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
+ * @uaddr: the futex we initialyl wait on (non-pi)
+ * @fshared: whether the futexes are shared (1) or not (0). They must be
+ * the same type, no requeueing from private to shared, etc.
+ * @val: the expected value of uaddr
+ * @abs_time: absolute timeout
+ * @bitset: 32 bit wakeup bitset set by userspace, defaults to all.
+ * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
+ * @uaddr2: the pi futex we will take prior to returning to user-space
+ *
+ * The caller will wait on uaddr and will be requeued by futex_requeue() to
+ * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+ * complete the acquisition of the rt_mutex prior to returning to userspace.
+ * This ensures the rt_mutex maintains an owner when it has waiters; without
+ * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+ * need to.
+ *
+ * We call schedule in futex_wait_queue_me() when we enqueue and return there
+ * via the following:
+ * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
+ * 2) wakeup on uaddr2 after a requeue and subsequent unlock
+ * 3) signal (before or after requeue)
+ * 4) timeout (before or after requeue)
+ *
+ * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
+ *
+ * If 2, we may then block on trying to take the rt_mutex and return via:
+ * 5) successful lock
+ * 6) signal
+ * 7) timeout
+ * 8) other lock acquisition failure
+ *
+ * If 6, we setup a restart_block with futex_lock_pi() as the function.
+ *
+ * If 4 or 7, we cleanup and return with -ETIMEDOUT.
+ *
+ * Returns:
+ * 0 - On success
+ * <0 - On error
+ */
+static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
+ u32 val, ktime_t *abs_time, u32 bitset,
+ int clockrt, u32 __user *uaddr2)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct rt_mutex *pi_mutex = NULL;
+ struct futex_hash_bucket *hb;
+ union futex_key key2;
+ struct futex_q q;
+ int res, ret;
+
+ if (!bitset)
+ return -EINVAL;
+
+ if (abs_time) {
+ to = &timeout;
+ hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init_sleeper(to, current);
+ hrtimer_set_expires_range_ns(&to->timer, *abs_time,
+ current->timer_slack_ns);
+ }
+
+ /*
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+ debug_rt_mutex_init_waiter(&rt_waiter);
+ rt_waiter.task = NULL;
+
+ q.pi_state = NULL;
+ q.bitset = bitset;
+ q.rt_waiter = &rt_waiter;
+
+ key2 = FUTEX_KEY_INIT;
+ ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+ if (unlikely(ret != 0))
+ goto out;
+
+ /* Prepare to wait on uaddr. */
+ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+ if (ret)
+ goto out_key2;
+
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+ spin_lock(&hb->lock);
+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+ spin_unlock(&hb->lock);
+ if (ret)
+ goto out_put_keys;
+
+ /*
+ * In order for us to be here, we know our q.key == key2, and since
+ * we took the hb->lock above, we also know that futex_requeue() has
+ * completed and we no longer have to concern ourselves with a wakeup
+ * race with the atomic proxy lock acquition by the requeue code.
+ */
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+ spin_lock(q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current,
+ fshared);
+ spin_unlock(q.lock_ptr);
+ }
+ } else {
+ /*
+ * We have been woken up by futex_unlock_pi(), a timeout, or a
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+ * the pi_state.
+ */
+ WARN_ON(!&q.pi_state);
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+
+ spin_lock(q.lock_ptr);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+ */
+ res = fixup_owner(uaddr2, fshared, &q, !ret);
+ /*
+ * If fixup_owner() returned an error, proprogate that. If it
+ * acquired the lock, clear our -ETIMEDOUT or -EINTR.
+ */
+ if (res)
+ ret = (res < 0) ? res : 0;
+
+ /* Unqueue and drop the lock. */
+ unqueue_me_pi(&q);
+ }
+
+ /*
+ * If fixup_pi_state_owner() faulted and was unable to handle the
+ * fault, unlock the rt_mutex and return the fault to userspace.
+ */
+ if (ret == -EFAULT) {
+ if (rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+ } else if (ret == -EINTR) {
+ /*
+ * We've already been requeued, but we have no way to
+ * restart by calling futex_lock_pi() directly. We
+ * could restart the syscall, but that will look at
+ * the user space value and return right away. So we
+ * drop back with EWOULDBLOCK to tell user space that
+ * "val" has been changed. That's the same what the
+ * restart of the syscall would do in
+ * futex_wait_setup().
+ */
+ ret = -EWOULDBLOCK;
+ }
+
+out_put_keys:
+ put_futex_key(fshared, &q.key);
+out_key2:
+ put_futex_key(fshared, &key2);
+
+out:
+ if (to) {
+ hrtimer_cancel(&to->timer);
+ destroy_hrtimer_on_stack(&to->timer);
+ }
+ return ret;
+}
+
/*
* Support for robust futexes: the kernel cleans up held futexes at
* thread exit time.
@@ -1894,7 +2475,7 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
fshared = 1;
clockrt = op & FUTEX_CLOCK_REALTIME;
- if (clockrt && cmd != FUTEX_WAIT_BITSET)
+ if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
return -ENOSYS;
switch (cmd) {
@@ -1909,10 +2490,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
ret = futex_wake(uaddr, fshared, val, val3);
break;
case FUTEX_REQUEUE:
- ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
+ ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
break;
case FUTEX_CMP_REQUEUE:
- ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
+ ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
+ 0);
break;
case FUTEX_WAKE_OP:
ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
@@ -1929,6 +2511,15 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
if (futex_cmpxchg_enabled)
ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
break;
+ case FUTEX_WAIT_REQUEUE_PI:
+ val3 = FUTEX_BITSET_MATCH_ANY;
+ ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
+ clockrt, uaddr2);
+ break;
+ case FUTEX_CMP_REQUEUE_PI:
+ ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
+ 1);
+ break;
default:
ret = -ENOSYS;
}
@@ -1946,7 +2537,8 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
int cmd = op & FUTEX_CMD_MASK;
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
- cmd == FUTEX_WAIT_BITSET)) {
+ cmd == FUTEX_WAIT_BITSET ||
+ cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
return -EFAULT;
if (!timespec_valid(&ts))
@@ -1958,11 +2550,11 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
tp = &t;
}
/*
- * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
+ * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
*/
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
- cmd == FUTEX_WAKE_OP)
+ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
val2 = (u32) (unsigned long) utime;
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
diff --git a/kernel/groups.c b/kernel/groups.c
new file mode 100644
index 000000000000..2b45b2ee3964
--- /dev/null
+++ b/kernel/groups.c
@@ -0,0 +1,288 @@
+/*
+ * Supplementary group IDs
+ */
+#include <linux/cred.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+
+/* init to 2 - one for init_task, one to ensure it is never freed */
+struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
+
+struct group_info *groups_alloc(int gidsetsize)
+{
+ struct group_info *group_info;
+ int nblocks;
+ int i;
+
+ nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
+ /* Make sure we always allocate at least one indirect block pointer */
+ nblocks = nblocks ? : 1;
+ group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
+ if (!group_info)
+ return NULL;
+ group_info->ngroups = gidsetsize;
+ group_info->nblocks = nblocks;
+ atomic_set(&group_info->usage, 1);
+
+ if (gidsetsize <= NGROUPS_SMALL)
+ group_info->blocks[0] = group_info->small_block;
+ else {
+ for (i = 0; i < nblocks; i++) {
+ gid_t *b;
+ b = (void *)__get_free_page(GFP_USER);
+ if (!b)
+ goto out_undo_partial_alloc;
+ group_info->blocks[i] = b;
+ }
+ }
+ return group_info;
+
+out_undo_partial_alloc:
+ while (--i >= 0) {
+ free_page((unsigned long)group_info->blocks[i]);
+ }
+ kfree(group_info);
+ return NULL;
+}
+
+EXPORT_SYMBOL(groups_alloc);
+
+void groups_free(struct group_info *group_info)
+{
+ if (group_info->blocks[0] != group_info->small_block) {
+ int i;
+ for (i = 0; i < group_info->nblocks; i++)
+ free_page((unsigned long)group_info->blocks[i]);
+ }
+ kfree(group_info);
+}
+
+EXPORT_SYMBOL(groups_free);
+
+/* export the group_info to a user-space array */
+static int groups_to_user(gid_t __user *grouplist,
+ const struct group_info *group_info)
+{
+ int i;
+ unsigned int count = group_info->ngroups;
+
+ for (i = 0; i < group_info->nblocks; i++) {
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
+
+ if (copy_to_user(grouplist, group_info->blocks[i], len))
+ return -EFAULT;
+
+ grouplist += NGROUPS_PER_BLOCK;
+ count -= cp_count;
+ }
+ return 0;
+}
+
+/* fill a group_info from a user-space array - it must be allocated already */
+static int groups_from_user(struct group_info *group_info,
+ gid_t __user *grouplist)
+{
+ int i;
+ unsigned int count = group_info->ngroups;
+
+ for (i = 0; i < group_info->nblocks; i++) {
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
+
+ if (copy_from_user(group_info->blocks[i], grouplist, len))
+ return -EFAULT;
+
+ grouplist += NGROUPS_PER_BLOCK;
+ count -= cp_count;
+ }
+ return 0;
+}
+
+/* a simple Shell sort */
+static void groups_sort(struct group_info *group_info)
+{
+ int base, max, stride;
+ int gidsetsize = group_info->ngroups;
+
+ for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
+ ; /* nothing */
+ stride /= 3;
+
+ while (stride) {
+ max = gidsetsize - stride;
+ for (base = 0; base < max; base++) {
+ int left = base;
+ int right = left + stride;
+ gid_t tmp = GROUP_AT(group_info, right);
+
+ while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
+ GROUP_AT(group_info, right) =
+ GROUP_AT(group_info, left);
+ right = left;
+ left -= stride;
+ }
+ GROUP_AT(group_info, right) = tmp;
+ }
+ stride /= 3;
+ }
+}
+
+/* a simple bsearch */
+int groups_search(const struct group_info *group_info, gid_t grp)
+{
+ unsigned int left, right;
+
+ if (!group_info)
+ return 0;
+
+ left = 0;
+ right = group_info->ngroups;
+ while (left < right) {
+ unsigned int mid = (left+right)/2;
+ int cmp = grp - GROUP_AT(group_info, mid);
+ if (cmp > 0)
+ left = mid + 1;
+ else if (cmp < 0)
+ right = mid;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * set_groups - Change a group subscription in a set of credentials
+ * @new: The newly prepared set of credentials to alter
+ * @group_info: The group list to install
+ *
+ * Validate a group subscription and, if valid, insert it into a set
+ * of credentials.
+ */
+int set_groups(struct cred *new, struct group_info *group_info)
+{
+ int retval;
+
+ retval = security_task_setgroups(group_info);
+ if (retval)
+ return retval;
+
+ put_group_info(new->group_info);
+ groups_sort(group_info);
+ get_group_info(group_info);
+ new->group_info = group_info;
+ return 0;
+}
+
+EXPORT_SYMBOL(set_groups);
+
+/**
+ * set_current_groups - Change current's group subscription
+ * @group_info: The group list to impose
+ *
+ * Validate a group subscription and, if valid, impose it upon current's task
+ * security record.
+ */
+int set_current_groups(struct group_info *group_info)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = set_groups(new, group_info);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+EXPORT_SYMBOL(set_current_groups);
+
+SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
+{
+ const struct cred *cred = current_cred();
+ int i;
+
+ if (gidsetsize < 0)
+ return -EINVAL;
+
+ /* no need to grab task_lock here; it cannot change */
+ i = cred->group_info->ngroups;
+ if (gidsetsize) {
+ if (i > gidsetsize) {
+ i = -EINVAL;
+ goto out;
+ }
+ if (groups_to_user(grouplist, cred->group_info)) {
+ i = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ return i;
+}
+
+/*
+ * SMP: Our groups are copy-on-write. We can set them safely
+ * without another task interfering.
+ */
+
+SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+{
+ struct group_info *group_info;
+ int retval;
+
+ if (!capable(CAP_SETGID))
+ return -EPERM;
+ if ((unsigned)gidsetsize > NGROUPS_MAX)
+ return -EINVAL;
+
+ group_info = groups_alloc(gidsetsize);
+ if (!group_info)
+ return -ENOMEM;
+ retval = groups_from_user(group_info, grouplist);
+ if (retval) {
+ put_group_info(group_info);
+ return retval;
+ }
+
+ retval = set_current_groups(group_info);
+ put_group_info(group_info);
+
+ return retval;
+}
+
+/*
+ * Check whether we're fsgid/egid or in the supplemental group..
+ */
+int in_group_p(gid_t grp)
+{
+ const struct cred *cred = current_cred();
+ int retval = 1;
+
+ if (grp != cred->fsgid)
+ retval = groups_search(cred->group_info, grp);
+ return retval;
+}
+
+EXPORT_SYMBOL(in_group_p);
+
+int in_egroup_p(gid_t grp)
+{
+ const struct cred *cred = current_cred();
+ int retval = 1;
+
+ if (grp != cred->egid)
+ retval = groups_search(cred->group_info, grp);
+ return retval;
+}
+
+EXPORT_SYMBOL(in_egroup_p);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cb8a15c19583..b675a67c9ac3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -43,6 +43,8 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <linux/debugobjects.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
#include <asm/uaccess.h>
@@ -193,12 +195,24 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
* Switch the timer base to the current CPU when possible.
*/
static inline struct hrtimer_clock_base *
-switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
+switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
+ int pinned)
{
struct hrtimer_clock_base *new_base;
struct hrtimer_cpu_base *new_cpu_base;
+ int cpu, preferred_cpu = -1;
+
+ cpu = smp_processor_id();
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+ if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
+ preferred_cpu = get_nohz_load_balancer();
+ if (preferred_cpu >= 0)
+ cpu = preferred_cpu;
+ }
+#endif
- new_cpu_base = &__get_cpu_var(hrtimer_bases);
+again:
+ new_cpu_base = &per_cpu(hrtimer_bases, cpu);
new_base = &new_cpu_base->clock_base[base->index];
if (base != new_base) {
@@ -218,6 +232,40 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
timer->base = NULL;
spin_unlock(&base->cpu_base->lock);
spin_lock(&new_base->cpu_base->lock);
+
+ /* Optimized away for NOHZ=n SMP=n */
+ if (cpu == preferred_cpu) {
+ /* Calculate clock monotonic expiry time */
+#ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires = ktime_sub(hrtimer_get_expires(timer),
+ new_base->offset);
+#else
+ ktime_t expires = hrtimer_get_expires(timer);
+#endif
+
+ /*
+ * Get the next event on target cpu from the
+ * clock events layer.
+ * This covers the highres=off nohz=on case as well.
+ */
+ ktime_t next = clockevents_get_next_event(cpu);
+
+ ktime_t delta = ktime_sub(expires, next);
+
+ /*
+ * We do not migrate the timer when it is expiring
+ * before the next event on the target cpu because
+ * we cannot reprogram the target cpu hardware and
+ * we would cause it to fire late.
+ */
+ if (delta.tv64 < 0) {
+ cpu = smp_processor_id();
+ spin_unlock(&new_base->cpu_base->lock);
+ spin_lock(&base->cpu_base->lock);
+ timer->base = base;
+ goto again;
+ }
+ }
timer->base = new_base;
}
return new_base;
@@ -235,7 +283,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
return base;
}
-# define switch_hrtimer_base(t, b) (b)
+# define switch_hrtimer_base(t, b, p) (b)
#endif /* !CONFIG_SMP */
@@ -907,9 +955,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
ret = remove_hrtimer(timer, base);
/* Switch the timer base, if necessary: */
- new_base = switch_hrtimer_base(timer, base);
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
- if (mode == HRTIMER_MODE_REL) {
+ if (mode & HRTIMER_MODE_REL) {
tim = ktime_add_safe(tim, new_base->get_time());
/*
* CONFIG_TIME_LOW_RES is a temporary way for architectures
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 3394f8f52964..7d047808419d 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
-obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
+obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o
obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c687ba4363f2..13c68e71b726 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,7 +359,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
spin_lock(&desc->lock);
mask_ack_irq(desc, irq);
- desc = irq_remap_to_desc(irq, desc);
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
@@ -438,7 +437,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_INPROGRESS;
out:
desc->chip->eoi(irq);
- desc = irq_remap_to_desc(irq, desc);
spin_unlock(&desc->lock);
}
@@ -475,7 +473,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
!desc->action)) {
desc->status |= (IRQ_PENDING | IRQ_MASKED);
mask_ack_irq(desc, irq);
- desc = irq_remap_to_desc(irq, desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(irq, desc);
@@ -483,7 +480,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
/* Start handling the irq */
if (desc->chip->ack)
desc->chip->ack(irq);
- desc = irq_remap_to_desc(irq, desc);
/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
@@ -544,10 +540,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
- if (desc->chip->eoi) {
+ if (desc->chip->eoi)
desc->chip->eoi(irq);
- desc = irq_remap_to_desc(irq, desc);
- }
}
void
@@ -582,10 +576,8 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
/* Uninstall? */
if (handle == handle_bad_irq) {
- if (desc->chip != &no_irq_chip) {
+ if (desc->chip != &no_irq_chip)
mask_ack_irq(desc, irq);
- desc = irq_remap_to_desc(irq, desc);
- }
desc->status |= IRQ_DISABLED;
desc->depth = 1;
}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 5dd2572993cf..065205bdd920 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
*/
#include <linux/irq.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
@@ -44,7 +45,7 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
static void __init init_irq_default_affinity(void)
{
- alloc_bootmem_cpumask_var(&irq_default_affinity);
+ alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
cpumask_setall(irq_default_affinity);
}
#else
@@ -81,45 +82,48 @@ static struct irq_desc irq_desc_init = {
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
};
-void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
+void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
{
- int node;
void *ptr;
- node = cpu_to_node(cpu);
- ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
+ if (slab_is_available())
+ ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
+ GFP_ATOMIC, node);
+ else
+ ptr = alloc_bootmem_node(NODE_DATA(node),
+ nr * sizeof(*desc->kstat_irqs));
/*
* don't overwite if can not get new one
* init_copy_kstat_irqs() could still use old one
*/
if (ptr) {
- printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
- cpu, node);
+ printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
desc->kstat_irqs = ptr;
}
}
-static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
+static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
spin_lock_init(&desc->lock);
desc->irq = irq;
#ifdef CONFIG_SMP
- desc->cpu = cpu;
+ desc->node = node;
#endif
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
- init_kstat_irqs(desc, cpu, nr_cpu_ids);
+ init_kstat_irqs(desc, node, nr_cpu_ids);
if (!desc->kstat_irqs) {
printk(KERN_ERR "can not alloc kstat_irqs\n");
BUG_ON(1);
}
- if (!init_alloc_desc_masks(desc, cpu, false)) {
+ if (!alloc_desc_masks(desc, node, false)) {
printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
BUG_ON(1);
}
- arch_init_chip_data(desc, cpu);
+ init_desc_masks(desc);
+ arch_init_chip_data(desc, node);
}
/*
@@ -146,6 +150,7 @@ int __init early_irq_init(void)
{
struct irq_desc *desc;
int legacy_count;
+ int node;
int i;
init_irq_default_affinity();
@@ -156,20 +161,21 @@ int __init early_irq_init(void)
desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy);
+ node = first_online_node;
/* allocate irq_desc_ptrs array based on nr_irqs */
- irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+ irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
/* allocate based on nr_cpu_ids */
- /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
- kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
- sizeof(int));
+ kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
+ sizeof(int), GFP_NOWAIT, node);
for (i = 0; i < legacy_count; i++) {
desc[i].irq = i;
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
- init_alloc_desc_masks(&desc[i], 0, true);
+ alloc_desc_masks(&desc[i], node, true);
+ init_desc_masks(&desc[i]);
irq_desc_ptrs[i] = desc + i;
}
@@ -187,11 +193,10 @@ struct irq_desc *irq_to_desc(unsigned int irq)
return NULL;
}
-struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
{
struct irq_desc *desc;
unsigned long flags;
- int node;
if (irq >= nr_irqs) {
WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
@@ -210,15 +215,17 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
if (desc)
goto out_unlock;
- node = cpu_to_node(cpu);
- desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
- printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
- irq, cpu, node);
+ if (slab_is_available())
+ desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
+ else
+ desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
+
+ printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
if (!desc) {
printk(KERN_ERR "can not alloc irq_desc\n");
BUG_ON(1);
}
- init_one_irq_desc(irq, desc, cpu);
+ init_one_irq_desc(irq, desc, node);
irq_desc_ptrs[irq] = desc;
@@ -256,7 +263,8 @@ int __init early_irq_init(void)
for (i = 0; i < count; i++) {
desc[i].irq = i;
- init_alloc_desc_masks(&desc[i], 0, true);
+ alloc_desc_masks(&desc[i], 0, true);
+ init_desc_masks(&desc[i]);
desc[i].kstat_irqs = kstat_irqs_all[i];
}
return arch_early_irq_init();
@@ -267,7 +275,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
-struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
+struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
{
return irq_to_desc(irq);
}
@@ -450,11 +458,8 @@ unsigned int __do_IRQ(unsigned int irq)
/*
* No locking required for CPU-local interrupts:
*/
- if (desc->chip->ack) {
+ if (desc->chip->ack)
desc->chip->ack(irq);
- /* get new one */
- desc = irq_remap_to_desc(irq, desc);
- }
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
if (!noirqdebug)
@@ -465,10 +470,8 @@ unsigned int __do_IRQ(unsigned int irq)
}
spin_lock(&desc->lock);
- if (desc->chip->ack) {
+ if (desc->chip->ack)
desc->chip->ack(irq);
- desc = irq_remap_to_desc(irq, desc);
- }
/*
* REPLAY is when Linux resends an IRQ that was dropped earlier
* WAITING is used by probe to mark irqs that are being tested
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 01ce20eab38f..73468253143b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
extern struct lock_class_key irq_desc_lock_class;
-extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
+extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
extern void clear_kstat_irqs(struct irq_desc *desc);
extern spinlock_t sparse_irq_lock;
@@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq,
extern int irq_select_affinity_usr(unsigned int irq);
+extern void
+irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
+
/*
* Debugging printout:
*/
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2734eca59243..aaf5c9d05770 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq)
return 1;
}
-static void
+void
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
{
struct irqaction *action = desc->action;
@@ -109,17 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
spin_lock_irqsave(&desc->lock, flags);
#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (desc->status & IRQ_MOVE_PCNTXT)
- desc->chip->set_affinity(irq, cpumask);
+ if (desc->status & IRQ_MOVE_PCNTXT) {
+ if (!desc->chip->set_affinity(irq, cpumask)) {
+ cpumask_copy(desc->affinity, cpumask);
+ irq_set_thread_affinity(desc, cpumask);
+ }
+ }
else {
desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(desc->pending_mask, cpumask);
}
#else
- cpumask_copy(desc->affinity, cpumask);
- desc->chip->set_affinity(irq, cpumask);
+ if (!desc->chip->set_affinity(irq, cpumask)) {
+ cpumask_copy(desc->affinity, cpumask);
+ irq_set_thread_affinity(desc, cpumask);
+ }
#endif
- irq_set_thread_affinity(desc, cpumask);
desc->status |= IRQ_AFFINITY_SET;
spin_unlock_irqrestore(&desc->lock, flags);
return 0;
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index e05ad9be43b7..cfe767ca1545 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,5 +1,8 @@
#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include "internals.h"
void move_masked_irq(int irq)
{
@@ -39,11 +42,12 @@ void move_masked_irq(int irq)
* masking the irqs.
*/
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
- < nr_cpu_ids)) {
- cpumask_and(desc->affinity,
- desc->pending_mask, cpu_online_mask);
- desc->chip->set_affinity(irq, desc->affinity);
- }
+ < nr_cpu_ids))
+ if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
+ cpumask_copy(desc->affinity, desc->pending_mask);
+ irq_set_thread_affinity(desc, desc->pending_mask);
+ }
+
cpumask_clear(desc->pending_mask);
}
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 44bbdcbaf8d2..2f69bee57bf2 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -15,9 +15,9 @@
static void init_copy_kstat_irqs(struct irq_desc *old_desc,
struct irq_desc *desc,
- int cpu, int nr)
+ int node, int nr)
{
- init_kstat_irqs(desc, cpu, nr);
+ init_kstat_irqs(desc, node, nr);
if (desc->kstat_irqs != old_desc->kstat_irqs)
memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
@@ -34,20 +34,20 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
}
static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
- struct irq_desc *desc, int cpu)
+ struct irq_desc *desc, int node)
{
memcpy(desc, old_desc, sizeof(struct irq_desc));
- if (!init_alloc_desc_masks(desc, cpu, false)) {
+ if (!alloc_desc_masks(desc, node, false)) {
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
"for migration.\n", irq);
return false;
}
spin_lock_init(&desc->lock);
- desc->cpu = cpu;
+ desc->node = node;
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
- init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+ init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
init_copy_desc_masks(old_desc, desc);
- arch_init_copy_chip_data(old_desc, desc, cpu);
+ arch_init_copy_chip_data(old_desc, desc, node);
return true;
}
@@ -59,12 +59,11 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
}
static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
- int cpu)
+ int node)
{
struct irq_desc *desc;
unsigned int irq;
unsigned long flags;
- int node;
irq = old_desc->irq;
@@ -76,7 +75,6 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
if (desc && old_desc != desc)
goto out_unlock;
- node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
if (!desc) {
printk(KERN_ERR "irq %d: can not get new irq_desc "
@@ -85,7 +83,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
desc = old_desc;
goto out_unlock;
}
- if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
+ if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) {
/* still use old one */
kfree(desc);
desc = old_desc;
@@ -97,9 +95,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
/* free the old one */
free_one_irq_desc(old_desc, desc);
- spin_unlock(&old_desc->lock);
kfree(old_desc);
- spin_lock(&desc->lock);
return desc;
@@ -109,24 +105,14 @@ out_unlock:
return desc;
}
-struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
+struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
{
- int old_cpu;
- int node, old_node;
-
/* those all static, do move them */
if (desc->irq < NR_IRQS_LEGACY)
return desc;
- old_cpu = desc->cpu;
- if (old_cpu != cpu) {
- node = cpu_to_node(cpu);
- old_node = cpu_to_node(old_cpu);
- if (old_node != node)
- desc = __real_move_irq_desc(desc, cpu);
- else
- desc->cpu = cpu;
- }
+ if (desc->node != node)
+ desc = __real_move_irq_desc(desc, node);
return desc;
}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 374faf9bfdc7..3a29dbe7898e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,12 +30,16 @@
#define all_var 0
#endif
-/* These will be re-linked against their real values during the second link stage */
+/*
+ * These will be re-linked against their real values
+ * during the second link stage.
+ */
extern const unsigned long kallsyms_addresses[] __attribute__((weak));
extern const u8 kallsyms_names[] __attribute__((weak));
-/* tell the compiler that the count isn't in the small data section if the arch
- * has one (eg: FRV)
+/*
+ * Tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV).
*/
extern const unsigned long kallsyms_num_syms
__attribute__((weak, section(".rodata")));
@@ -75,31 +79,37 @@ static int is_ksym_addr(unsigned long addr)
return is_kernel_text(addr) || is_kernel_inittext(addr);
}
-/* expand a compressed symbol data into the resulting uncompressed string,
- given the offset to where the symbol is in the compressed stream */
+/*
+ * Expand a compressed symbol data into the resulting uncompressed string,
+ * given the offset to where the symbol is in the compressed stream.
+ */
static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
{
int len, skipped_first = 0;
const u8 *tptr, *data;
- /* get the compressed symbol length from the first symbol byte */
+ /* Get the compressed symbol length from the first symbol byte. */
data = &kallsyms_names[off];
len = *data;
data++;
- /* update the offset to return the offset for the next symbol on
- * the compressed stream */
+ /*
+ * Update the offset to return the offset for the next symbol on
+ * the compressed stream.
+ */
off += len + 1;
- /* for every byte on the compressed symbol data, copy the table
- entry for that byte */
- while(len) {
- tptr = &kallsyms_token_table[ kallsyms_token_index[*data] ];
+ /*
+ * For every byte on the compressed symbol data, copy the table
+ * entry for that byte.
+ */
+ while (len) {
+ tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
data++;
len--;
while (*tptr) {
- if(skipped_first) {
+ if (skipped_first) {
*result = *tptr;
result++;
} else
@@ -110,36 +120,46 @@ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
*result = '\0';
- /* return to offset to the next symbol */
+ /* Return to offset to the next symbol. */
return off;
}
-/* get symbol type information. This is encoded as a single char at the
- * begining of the symbol name */
+/*
+ * Get symbol type information. This is encoded as a single char at the
+ * beginning of the symbol name.
+ */
static char kallsyms_get_symbol_type(unsigned int off)
{
- /* get just the first code, look it up in the token table, and return the
- * first char from this token */
- return kallsyms_token_table[ kallsyms_token_index[ kallsyms_names[off+1] ] ];
+ /*
+ * Get just the first code, look it up in the token table,
+ * and return the first char from this token.
+ */
+ return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
}
-/* find the offset on the compressed stream given and index in the
- * kallsyms array */
+/*
+ * Find the offset on the compressed stream given and index in the
+ * kallsyms array.
+ */
static unsigned int get_symbol_offset(unsigned long pos)
{
const u8 *name;
int i;
- /* use the closest marker we have. We have markers every 256 positions,
- * so that should be close enough */
- name = &kallsyms_names[ kallsyms_markers[pos>>8] ];
+ /*
+ * Use the closest marker we have. We have markers every 256 positions,
+ * so that should be close enough.
+ */
+ name = &kallsyms_names[kallsyms_markers[pos >> 8]];
- /* sequentially scan all the symbols up to the point we're searching for.
- * Every symbol is stored in a [<len>][<len> bytes of data] format, so we
- * just need to add the len to the current pointer for every symbol we
- * wish to skip */
- for(i = 0; i < (pos&0xFF); i++)
+ /*
+ * Sequentially scan all the symbols up to the point we're searching
+ * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
+ * so we just need to add the len to the current pointer for every
+ * symbol we wish to skip.
+ */
+ for (i = 0; i < (pos & 0xFF); i++)
name = name + (*name) + 1;
return name - kallsyms_names;
@@ -190,7 +210,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
/* This kernel should never had been booted. */
BUG_ON(!kallsyms_addresses);
- /* do a binary search on the sorted kallsyms_addresses array */
+ /* Do a binary search on the sorted kallsyms_addresses array. */
low = 0;
high = kallsyms_num_syms;
@@ -203,15 +223,15 @@ static unsigned long get_symbol_pos(unsigned long addr,
}
/*
- * search for the first aliased symbol. Aliased
- * symbols are symbols with the same address
+ * Search for the first aliased symbol. Aliased
+ * symbols are symbols with the same address.
*/
while (low && kallsyms_addresses[low-1] == kallsyms_addresses[low])
--low;
symbol_start = kallsyms_addresses[low];
- /* Search for next non-aliased symbol */
+ /* Search for next non-aliased symbol. */
for (i = low + 1; i < kallsyms_num_syms; i++) {
if (kallsyms_addresses[i] > symbol_start) {
symbol_end = kallsyms_addresses[i];
@@ -219,7 +239,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
}
}
- /* if we found no next symbol, we use the end of the section */
+ /* If we found no next symbol, we use the end of the section. */
if (!symbol_end) {
if (is_kernel_inittext(addr))
symbol_end = (unsigned long)_einittext;
@@ -252,10 +272,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
/*
* Lookup an address
- * - modname is set to NULL if it's in the kernel
- * - we guarantee that the returned name is valid until we reschedule even if
- * it resides in a module
- * - we also guarantee that modname will be valid until rescheduled
+ * - modname is set to NULL if it's in the kernel.
+ * - We guarantee that the returned name is valid until we reschedule even if.
+ * It resides in a module.
+ * - We also guarantee that modname will be valid until rescheduled.
*/
const char *kallsyms_lookup(unsigned long addr,
unsigned long *symbolsize,
@@ -276,7 +296,7 @@ const char *kallsyms_lookup(unsigned long addr,
return namebuf;
}
- /* see if it's in a module */
+ /* See if it's in a module. */
return module_address_lookup(addr, symbolsize, offset, modname,
namebuf);
}
@@ -294,7 +314,7 @@ int lookup_symbol_name(unsigned long addr, char *symname)
kallsyms_expand_symbol(get_symbol_offset(pos), symname);
return 0;
}
- /* see if it's in a module */
+ /* See if it's in a module. */
return lookup_module_symbol_name(addr, symname);
}
@@ -313,7 +333,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
modname[0] = '\0';
return 0;
}
- /* see if it's in a module */
+ /* See if it's in a module. */
return lookup_module_symbol_attrs(addr, size, offset, modname, name);
}
@@ -342,6 +362,7 @@ int sprint_symbol(char *buffer, unsigned long address)
return len;
}
+EXPORT_SYMBOL_GPL(sprint_symbol);
/* Look up a kernel symbol and print it to the kernel messages. */
void __print_symbol(const char *fmt, unsigned long address)
@@ -352,13 +373,13 @@ void __print_symbol(const char *fmt, unsigned long address)
printk(fmt, buffer);
}
+EXPORT_SYMBOL(__print_symbol);
/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
-struct kallsym_iter
-{
+struct kallsym_iter {
loff_t pos;
unsigned long value;
- unsigned int nameoff; /* If iterating in core kernel symbols */
+ unsigned int nameoff; /* If iterating in core kernel symbols. */
char type;
char name[KSYM_NAME_LEN];
char module_name[MODULE_NAME_LEN];
@@ -404,7 +425,7 @@ static int update_iter(struct kallsym_iter *iter, loff_t pos)
iter->pos = pos;
return get_ksymbol_mod(iter);
}
-
+
/* If we're not on the desired position, reset to new position. */
if (pos != iter->pos)
reset_iter(iter, pos);
@@ -439,23 +460,25 @@ static int s_show(struct seq_file *m, void *p)
{
struct kallsym_iter *iter = m->private;
- /* Some debugging symbols have no name. Ignore them. */
+ /* Some debugging symbols have no name. Ignore them. */
if (!iter->name[0])
return 0;
if (iter->module_name[0]) {
char type;
- /* Label it "global" if it is exported,
- * "local" if not exported. */
+ /*
+ * Label it "global" if it is exported,
+ * "local" if not exported.
+ */
type = iter->exported ? toupper(iter->type) :
tolower(iter->type);
seq_printf(m, "%0*lx %c %s\t[%s]\n",
- (int)(2*sizeof(void*)),
+ (int)(2 * sizeof(void *)),
iter->value, type, iter->name, iter->module_name);
} else
seq_printf(m, "%0*lx %c %s\n",
- (int)(2*sizeof(void*)),
+ (int)(2 * sizeof(void *)),
iter->value, iter->type, iter->name);
return 0;
}
@@ -469,9 +492,11 @@ static const struct seq_operations kallsyms_op = {
static int kallsyms_open(struct inode *inode, struct file *file)
{
- /* We keep iterator in m->private, since normal case is to
+ /*
+ * We keep iterator in m->private, since normal case is to
* s_start from where we left off, so we avoid doing
- * using get_symbol_offset for every symbol */
+ * using get_symbol_offset for every symbol.
+ */
struct kallsym_iter *iter;
int ret;
@@ -500,7 +525,4 @@ static int __init kallsyms_init(void)
proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
return 0;
}
-__initcall(kallsyms_init);
-
-EXPORT_SYMBOL(__print_symbol);
-EXPORT_SYMBOL_GPL(sprint_symbol);
+device_initcall(kallsyms_init);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 5a758c6e4950..ae1c35201cc8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1448,18 +1448,17 @@ int kernel_kexec(void)
goto Restore_console;
}
suspend_console();
- error = device_suspend(PMSG_FREEZE);
+ error = dpm_suspend_start(PMSG_FREEZE);
if (error)
goto Resume_console;
- device_pm_lock();
- /* At this point, device_suspend() has been called,
- * but *not* device_power_down(). We *must*
- * device_power_down() now. Otherwise, drivers for
+ /* At this point, dpm_suspend_start() has been called,
+ * but *not* dpm_suspend_noirq(). We *must* call
+ * dpm_suspend_noirq() now. Otherwise, drivers for
* some devices (e.g. interrupt controllers) become
* desynchronized with the actual state of the
* hardware at resume time, and evil weirdness ensues.
*/
- error = device_power_down(PMSG_FREEZE);
+ error = dpm_suspend_noirq(PMSG_FREEZE);
if (error)
goto Resume_devices;
error = disable_nonboot_cpus();
@@ -1487,10 +1486,9 @@ int kernel_kexec(void)
local_irq_enable();
Enable_cpus:
enable_nonboot_cpus();
- device_power_up(PMSG_RESTORE);
+ dpm_resume_noirq(PMSG_RESTORE);
Resume_devices:
- device_pm_unlock();
- device_resume(PMSG_RESTORE);
+ dpm_resume_end(PMSG_RESTORE);
Resume_console:
resume_console();
thaw_processes();
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index bc41ad0f24f8..26539e3228e5 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -72,9 +72,9 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
/*
* round up to the next power of 2, since our 'let the indices
- * wrap' tachnique works only in this case.
+ * wrap' technique works only in this case.
*/
- if (size & (size - 1)) {
+ if (!is_power_of_2(size)) {
BUG_ON(size > 0x80000000);
size = roundup_pow_of_two(size);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b750675251e5..7e95bedb2bfc 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -370,8 +370,10 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
sub_info->argv = argv;
sub_info->envp = envp;
sub_info->cred = prepare_usermodehelper_creds();
- if (!sub_info->cred)
+ if (!sub_info->cred) {
+ kfree(sub_info);
return NULL;
+ }
out:
return sub_info;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 41c88fe40500..7fa441333529 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -9,6 +9,7 @@
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
+#include <linux/cpuset.h>
#include <linux/unistd.h>
#include <linux/file.h>
#include <linux/module.h>
@@ -236,6 +237,7 @@ int kthreadd(void *unused)
ignore_signals(tsk);
set_user_nice(tsk, KTHREAD_NICE_LEVEL);
set_cpus_allowed_ptr(tsk, cpu_all_mask);
+ set_mems_allowed(node_possible_map);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index a2cc7e9a6e84..699a2ac3a0d7 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -54,9 +54,9 @@ enum {
* table (if it's not there yet), and we check it for lock order
* conflicts and deadlocks.
*/
-#define MAX_LOCKDEP_ENTRIES 8192UL
+#define MAX_LOCKDEP_ENTRIES 16384UL
-#define MAX_LOCKDEP_CHAINS_BITS 14
+#define MAX_LOCKDEP_CHAINS_BITS 15
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
diff --git a/kernel/module.c b/kernel/module.c
index 2383e60fcf3f..215aaab09e91 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -53,6 +53,7 @@
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/percpu.h>
+#include <linux/kmemleak.h>
#if 0
#define DEBUGP printk
@@ -73,6 +74,9 @@ DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
+/* Block module loading/unloading? */
+int modules_disabled = 0;
+
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);
@@ -430,6 +434,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
unsigned long extra;
unsigned int i;
void *ptr;
+ int cpu;
if (align > PAGE_SIZE) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
@@ -459,6 +464,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
if (!split_block(i, size))
return NULL;
+ /* add the per-cpu scanning areas */
+ for_each_possible_cpu(cpu)
+ kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
+ GFP_KERNEL);
+
/* Mark allocated */
pcpu_size[i] = -pcpu_size[i];
return ptr;
@@ -473,6 +483,7 @@ static void percpu_modfree(void *freeme)
{
unsigned int i;
void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
+ int cpu;
/* First entry is core kernel percpu data. */
for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
@@ -484,6 +495,10 @@ static void percpu_modfree(void *freeme)
BUG();
free:
+ /* remove the per-cpu scanning areas */
+ for_each_possible_cpu(cpu)
+ kmemleak_free(freeme + per_cpu_offset(cpu));
+
/* Merge with previous? */
if (pcpu_size[i-1] >= 0) {
pcpu_size[i-1] += pcpu_size[i];
@@ -778,7 +793,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
char name[MODULE_NAME_LEN];
int ret, forced = 0;
- if (!capable(CAP_SYS_MODULE))
+ if (!capable(CAP_SYS_MODULE) || modules_disabled)
return -EPERM;
if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
@@ -1876,6 +1891,36 @@ static void *module_alloc_update_bounds(unsigned long size)
return ret;
}
+#ifdef CONFIG_DEBUG_KMEMLEAK
+static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs, char *secstrings)
+{
+ unsigned int i;
+
+ /* only scan the sections containing data */
+ kmemleak_scan_area(mod->module_core, (unsigned long)mod -
+ (unsigned long)mod->module_core,
+ sizeof(struct module), GFP_KERNEL);
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+ if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
+ && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
+ continue;
+
+ kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
+ (unsigned long)mod->module_core,
+ sechdrs[i].sh_size, GFP_KERNEL);
+ }
+}
+#else
+static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs, char *secstrings)
+{
+}
+#endif
+
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static noinline struct module *load_module(void __user *umod,
@@ -2046,6 +2091,12 @@ static noinline struct module *load_module(void __user *umod,
/* Do the allocs. */
ptr = module_alloc_update_bounds(mod->core_size);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. Just mark it as not being a
+ * leak.
+ */
+ kmemleak_not_leak(ptr);
if (!ptr) {
err = -ENOMEM;
goto free_percpu;
@@ -2054,6 +2105,13 @@ static noinline struct module *load_module(void __user *umod,
mod->module_core = ptr;
ptr = module_alloc_update_bounds(mod->init_size);
+ /*
+ * The pointer to this block is stored in the module structure
+ * which is inside the block. This block doesn't need to be
+ * scanned as it contains data and code that will be freed
+ * after the module is initialized.
+ */
+ kmemleak_ignore(ptr);
if (!ptr && mod->init_size) {
err = -ENOMEM;
goto free_core;
@@ -2084,6 +2142,7 @@ static noinline struct module *load_module(void __user *umod,
}
/* Module has been moved. */
mod = (void *)sechdrs[modindex].sh_addr;
+ kmemleak_load_module(mod, hdr, sechdrs, secstrings);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
@@ -2338,7 +2397,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
int ret = 0;
/* Must have permission */
- if (!capable(CAP_SYS_MODULE))
+ if (!capable(CAP_SYS_MODULE) || modules_disabled)
return -EPERM;
/* Only one module load at a time, please */
@@ -2396,6 +2455,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
mutex_lock(&module_mutex);
/* Drop initial reference. */
module_put(mod);
+ trim_init_extable(mod);
module_free(mod, mod->module_init);
mod->module_init = NULL;
mod->init_size = 0;
@@ -2839,7 +2899,7 @@ void print_modules(void)
struct module *mod;
char buf[8];
- printk("Modules linked in:");
+ printk(KERN_DEFAULT "Modules linked in:");
/* Most callers should already have preempt disabled, but make sure */
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 507cf2b5e9f1..947b3ad551f8 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -89,7 +89,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
*
* This function is similar to (but not equivalent to) down().
*/
-void inline __sched mutex_lock(struct mutex *lock)
+void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
@@ -249,7 +249,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* didnt get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
- __schedule();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
spin_lock_mutex(&lock->wait_lock, flags);
}
@@ -471,5 +473,28 @@ int __sched mutex_trylock(struct mutex *lock)
return ret;
}
-
EXPORT_SYMBOL(mutex_trylock);
+
+/**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+ * @lock: the mutex to return holding if we dec to 0
+ *
+ * return true and hold lock if we dec to 0, return false otherwise
+ */
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+{
+ /* dec if we can't possibly hit 0 */
+ if (atomic_add_unless(cnt, -1, 1))
+ return 0;
+ /* we might hit 0, so take the lock */
+ mutex_lock(lock);
+ if (!atomic_dec_and_test(cnt)) {
+ /* when we actually did the dec, we didn't hit 0 */
+ mutex_unlock(lock);
+ return 0;
+ }
+ /* we hit 0, and we hold the lock */
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
diff --git a/kernel/panic.c b/kernel/panic.c
index 874ecf1307ae..984b3ecbd72c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -340,39 +340,44 @@ void oops_exit(void)
}
#ifdef WANT_WARN_ON_SLOWPATH
-void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
-{
+struct slowpath_args {
+ const char *fmt;
va_list args;
- char function[KSYM_SYMBOL_LEN];
- unsigned long caller = (unsigned long)__builtin_return_address(0);
- const char *board;
+};
- sprint_symbol(function, caller);
+static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args)
+{
+ const char *board;
printk(KERN_WARNING "------------[ cut here ]------------\n");
- printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
- line, function);
+ printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
board = dmi_get_system_info(DMI_PRODUCT_NAME);
if (board)
printk(KERN_WARNING "Hardware name: %s\n", board);
- if (*fmt) {
- va_start(args, fmt);
- vprintk(fmt, args);
- va_end(args);
- }
+ if (args)
+ vprintk(args->fmt, args->args);
print_modules();
dump_stack();
print_oops_end_marker();
add_taint(TAINT_WARN);
}
+
+void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
+{
+ struct slowpath_args args;
+
+ args.fmt = fmt;
+ va_start(args.args, fmt);
+ warn_slowpath_common(file, line, __builtin_return_address(0), &args);
+ va_end(args.args);
+}
EXPORT_SYMBOL(warn_slowpath_fmt);
void warn_slowpath_null(const char *file, int line)
{
- static const char *empty = "";
- warn_slowpath_fmt(file, line, empty);
+ warn_slowpath_common(file, line, __builtin_return_address(0), NULL);
}
EXPORT_SYMBOL(warn_slowpath_null);
#endif
diff --git a/kernel/params.c b/kernel/params.c
index de273ec85bd2..7f6912ced2ba 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,9 +24,6 @@
#include <linux/err.h>
#include <linux/slab.h>
-/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */
-#define KPARAM_KMALLOCED 0x80000000
-
#if 0
#define DEBUGP printk
#else
@@ -220,13 +217,13 @@ int param_set_charp(const char *val, struct kernel_param *kp)
return -ENOSPC;
}
- if (kp->perm & KPARAM_KMALLOCED)
+ if (kp->flags & KPARAM_KMALLOCED)
kfree(*(char **)kp->arg);
/* This is a hack. We can't need to strdup in early boot, and we
* don't need to; this mangled commandline is preserved. */
if (slab_is_available()) {
- kp->perm |= KPARAM_KMALLOCED;
+ kp->flags |= KPARAM_KMALLOCED;
*(char **)kp->arg = kstrdup(val, GFP_KERNEL);
if (!kp->arg)
return -ENOMEM;
@@ -241,44 +238,63 @@ int param_get_charp(char *buffer, struct kernel_param *kp)
return sprintf(buffer, "%s", *((char **)kp->arg));
}
+/* Actually could be a bool or an int, for historical reasons. */
int param_set_bool(const char *val, struct kernel_param *kp)
{
+ bool v;
+
/* No equals means "set"... */
if (!val) val = "1";
/* One of =[yYnN01] */
switch (val[0]) {
case 'y': case 'Y': case '1':
- *(int *)kp->arg = 1;
- return 0;
+ v = true;
+ break;
case 'n': case 'N': case '0':
- *(int *)kp->arg = 0;
- return 0;
+ v = false;
+ break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+
+ if (kp->flags & KPARAM_ISBOOL)
+ *(bool *)kp->arg = v;
+ else
+ *(int *)kp->arg = v;
+ return 0;
}
int param_get_bool(char *buffer, struct kernel_param *kp)
{
+ bool val;
+ if (kp->flags & KPARAM_ISBOOL)
+ val = *(bool *)kp->arg;
+ else
+ val = *(int *)kp->arg;
+
/* Y and N chosen as being relatively non-coder friendly */
- return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'Y' : 'N');
+ return sprintf(buffer, "%c", val ? 'Y' : 'N');
}
+/* This one must be bool. */
int param_set_invbool(const char *val, struct kernel_param *kp)
{
- int boolval, ret;
+ int ret;
+ bool boolval;
struct kernel_param dummy;
dummy.arg = &boolval;
+ dummy.flags = KPARAM_ISBOOL;
ret = param_set_bool(val, &dummy);
if (ret == 0)
- *(int *)kp->arg = !boolval;
+ *(bool *)kp->arg = !boolval;
return ret;
}
int param_get_invbool(char *buffer, struct kernel_param *kp)
{
- return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y');
+ return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
}
/* We break the rule and mangle the string. */
@@ -591,7 +607,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
unsigned int i;
for (i = 0; i < num; i++)
- if (params[i].perm & KPARAM_KMALLOCED)
+ if (params[i].flags & KPARAM_KMALLOCED)
kfree(*(char **)params[i].arg);
}
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
new file mode 100644
index 000000000000..29b685f551aa
--- /dev/null
+++ b/kernel/perf_counter.c
@@ -0,0 +1,4339 @@
+/*
+ * Performance counter core code
+ *
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ *
+ * For licensing details see kernel-base/COPYING
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/sysfs.h>
+#include <linux/dcache.h>
+#include <linux/percpu.h>
+#include <linux/ptrace.h>
+#include <linux/vmstat.h>
+#include <linux/hardirq.h>
+#include <linux/rculist.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/anon_inodes.h>
+#include <linux/kernel_stat.h>
+#include <linux/perf_counter.h>
+
+#include <asm/irq_regs.h>
+
+/*
+ * Each CPU has a list of per CPU counters:
+ */
+DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
+
+int perf_max_counters __read_mostly = 1;
+static int perf_reserved_percpu __read_mostly;
+static int perf_overcommit __read_mostly = 1;
+
+static atomic_t nr_counters __read_mostly;
+static atomic_t nr_mmap_counters __read_mostly;
+static atomic_t nr_comm_counters __read_mostly;
+
+/*
+ * perf counter paranoia level:
+ * 0 - not paranoid
+ * 1 - disallow cpu counters to unpriv
+ * 2 - disallow kernel profiling to unpriv
+ */
+int sysctl_perf_counter_paranoid __read_mostly;
+
+static inline bool perf_paranoid_cpu(void)
+{
+ return sysctl_perf_counter_paranoid > 0;
+}
+
+static inline bool perf_paranoid_kernel(void)
+{
+ return sysctl_perf_counter_paranoid > 1;
+}
+
+int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
+
+/*
+ * max perf counter sample rate
+ */
+int sysctl_perf_counter_sample_rate __read_mostly = 100000;
+
+static atomic64_t perf_counter_id;
+
+/*
+ * Lock for (sysadmin-configurable) counter reservations:
+ */
+static DEFINE_SPINLOCK(perf_resource_lock);
+
+/*
+ * Architecture provided APIs - weak aliases:
+ */
+extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+{
+ return NULL;
+}
+
+void __weak hw_perf_disable(void) { barrier(); }
+void __weak hw_perf_enable(void) { barrier(); }
+
+void __weak hw_perf_counter_setup(int cpu) { barrier(); }
+
+int __weak
+hw_perf_group_sched_in(struct perf_counter *group_leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx, int cpu)
+{
+ return 0;
+}
+
+void __weak perf_counter_print_debug(void) { }
+
+static DEFINE_PER_CPU(int, disable_count);
+
+void __perf_disable(void)
+{
+ __get_cpu_var(disable_count)++;
+}
+
+bool __perf_enable(void)
+{
+ return !--__get_cpu_var(disable_count);
+}
+
+void perf_disable(void)
+{
+ __perf_disable();
+ hw_perf_disable();
+}
+
+void perf_enable(void)
+{
+ if (__perf_enable())
+ hw_perf_enable();
+}
+
+static void get_ctx(struct perf_counter_context *ctx)
+{
+ atomic_inc(&ctx->refcount);
+}
+
+static void free_ctx(struct rcu_head *head)
+{
+ struct perf_counter_context *ctx;
+
+ ctx = container_of(head, struct perf_counter_context, rcu_head);
+ kfree(ctx);
+}
+
+static void put_ctx(struct perf_counter_context *ctx)
+{
+ if (atomic_dec_and_test(&ctx->refcount)) {
+ if (ctx->parent_ctx)
+ put_ctx(ctx->parent_ctx);
+ if (ctx->task)
+ put_task_struct(ctx->task);
+ call_rcu(&ctx->rcu_head, free_ctx);
+ }
+}
+
+/*
+ * Get the perf_counter_context for a task and lock it.
+ * This has to cope with with the fact that until it is locked,
+ * the context could get moved to another task.
+ */
+static struct perf_counter_context *
+perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+{
+ struct perf_counter_context *ctx;
+
+ rcu_read_lock();
+ retry:
+ ctx = rcu_dereference(task->perf_counter_ctxp);
+ if (ctx) {
+ /*
+ * If this context is a clone of another, it might
+ * get swapped for another underneath us by
+ * perf_counter_task_sched_out, though the
+ * rcu_read_lock() protects us from any context
+ * getting freed. Lock the context and check if it
+ * got swapped before we could get the lock, and retry
+ * if so. If we locked the right context, then it
+ * can't get swapped on us any more.
+ */
+ spin_lock_irqsave(&ctx->lock, *flags);
+ if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
+ spin_unlock_irqrestore(&ctx->lock, *flags);
+ goto retry;
+ }
+ }
+ rcu_read_unlock();
+ return ctx;
+}
+
+/*
+ * Get the context for a task and increment its pin_count so it
+ * can't get swapped to another task. This also increments its
+ * reference count so that the context can't get freed.
+ */
+static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
+{
+ struct perf_counter_context *ctx;
+ unsigned long flags;
+
+ ctx = perf_lock_task_context(task, &flags);
+ if (ctx) {
+ ++ctx->pin_count;
+ get_ctx(ctx);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+ return ctx;
+}
+
+static void perf_unpin_context(struct perf_counter_context *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ --ctx->pin_count;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ put_ctx(ctx);
+}
+
+/*
+ * Add a counter from the lists for its context.
+ * Must be called with ctx->mutex and ctx->lock held.
+ */
+static void
+list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+{
+ struct perf_counter *group_leader = counter->group_leader;
+
+ /*
+ * Depending on whether it is a standalone or sibling counter,
+ * add it straight to the context's counter list, or to the group
+ * leader's sibling list:
+ */
+ if (group_leader == counter)
+ list_add_tail(&counter->list_entry, &ctx->counter_list);
+ else {
+ list_add_tail(&counter->list_entry, &group_leader->sibling_list);
+ group_leader->nr_siblings++;
+ }
+
+ list_add_rcu(&counter->event_entry, &ctx->event_list);
+ ctx->nr_counters++;
+}
+
+/*
+ * Remove a counter from the lists for its context.
+ * Must be called with ctx->mutex and ctx->lock held.
+ */
+static void
+list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
+{
+ struct perf_counter *sibling, *tmp;
+
+ if (list_empty(&counter->list_entry))
+ return;
+ ctx->nr_counters--;
+
+ list_del_init(&counter->list_entry);
+ list_del_rcu(&counter->event_entry);
+
+ if (counter->group_leader != counter)
+ counter->group_leader->nr_siblings--;
+
+ /*
+ * If this was a group counter with sibling counters then
+ * upgrade the siblings to singleton counters by adding them
+ * to the context list directly:
+ */
+ list_for_each_entry_safe(sibling, tmp,
+ &counter->sibling_list, list_entry) {
+
+ list_move_tail(&sibling->list_entry, &ctx->counter_list);
+ sibling->group_leader = sibling;
+ }
+}
+
+static void
+counter_sched_out(struct perf_counter *counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx)
+{
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return;
+
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_stopped = ctx->time;
+ counter->pmu->disable(counter);
+ counter->oncpu = -1;
+
+ if (!is_software_counter(counter))
+ cpuctx->active_oncpu--;
+ ctx->nr_active--;
+ if (counter->attr.exclusive || !cpuctx->active_oncpu)
+ cpuctx->exclusive = 0;
+}
+
+static void
+group_sched_out(struct perf_counter *group_counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx)
+{
+ struct perf_counter *counter;
+
+ if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return;
+
+ counter_sched_out(group_counter, cpuctx, ctx);
+
+ /*
+ * Schedule out siblings (if any):
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
+ counter_sched_out(counter, cpuctx, ctx);
+
+ if (group_counter->attr.exclusive)
+ cpuctx->exclusive = 0;
+}
+
+/*
+ * Cross CPU call to remove a performance counter
+ *
+ * We disable the counter on the hardware level first. After that we
+ * remove it from the context list.
+ */
+static void __perf_counter_remove_from_context(void *info)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_counter *counter = info;
+ struct perf_counter_context *ctx = counter->ctx;
+
+ /*
+ * If this is a task context, we need to check whether it is
+ * the current task context of this cpu. If not it has been
+ * scheduled out before the smp call arrived.
+ */
+ if (ctx->task && cpuctx->task_ctx != ctx)
+ return;
+
+ spin_lock(&ctx->lock);
+ /*
+ * Protect the list operation against NMI by disabling the
+ * counters on a global level.
+ */
+ perf_disable();
+
+ counter_sched_out(counter, cpuctx, ctx);
+
+ list_del_counter(counter, ctx);
+
+ if (!ctx->task) {
+ /*
+ * Allow more per task counters with respect to the
+ * reservation:
+ */
+ cpuctx->max_pertask =
+ min(perf_max_counters - ctx->nr_counters,
+ perf_max_counters - perf_reserved_percpu);
+ }
+
+ perf_enable();
+ spin_unlock(&ctx->lock);
+}
+
+
+/*
+ * Remove the counter from a task's (or a CPU's) list of counters.
+ *
+ * Must be called with ctx->mutex held.
+ *
+ * CPU counters are removed with a smp call. For task counters we only
+ * call when the task is on a CPU.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid. This is OK when called from perf_release since
+ * that only calls us on the top-level context, which can't be a clone.
+ * When called from perf_counter_exit_task, it's OK because the
+ * context has been detached from its task.
+ */
+static void perf_counter_remove_from_context(struct perf_counter *counter)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ struct task_struct *task = ctx->task;
+
+ if (!task) {
+ /*
+ * Per cpu counters are removed via an smp call and
+ * the removal is always sucessful.
+ */
+ smp_call_function_single(counter->cpu,
+ __perf_counter_remove_from_context,
+ counter, 1);
+ return;
+ }
+
+retry:
+ task_oncpu_function_call(task, __perf_counter_remove_from_context,
+ counter);
+
+ spin_lock_irq(&ctx->lock);
+ /*
+ * If the context is active we need to retry the smp call.
+ */
+ if (ctx->nr_active && !list_empty(&counter->list_entry)) {
+ spin_unlock_irq(&ctx->lock);
+ goto retry;
+ }
+
+ /*
+ * The lock prevents that this context is scheduled in so we
+ * can remove the counter safely, if the call above did not
+ * succeed.
+ */
+ if (!list_empty(&counter->list_entry)) {
+ list_del_counter(counter, ctx);
+ }
+ spin_unlock_irq(&ctx->lock);
+}
+
+static inline u64 perf_clock(void)
+{
+ return cpu_clock(smp_processor_id());
+}
+
+/*
+ * Update the record of the current time in a context.
+ */
+static void update_context_time(struct perf_counter_context *ctx)
+{
+ u64 now = perf_clock();
+
+ ctx->time += now - ctx->timestamp;
+ ctx->timestamp = now;
+}
+
+/*
+ * Update the total_time_enabled and total_time_running fields for a counter.
+ */
+static void update_counter_times(struct perf_counter *counter)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ u64 run_end;
+
+ if (counter->state < PERF_COUNTER_STATE_INACTIVE)
+ return;
+
+ counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
+
+ if (counter->state == PERF_COUNTER_STATE_INACTIVE)
+ run_end = counter->tstamp_stopped;
+ else
+ run_end = ctx->time;
+
+ counter->total_time_running = run_end - counter->tstamp_running;
+}
+
+/*
+ * Update total_time_enabled and total_time_running for all counters in a group.
+ */
+static void update_group_times(struct perf_counter *leader)
+{
+ struct perf_counter *counter;
+
+ update_counter_times(leader);
+ list_for_each_entry(counter, &leader->sibling_list, list_entry)
+ update_counter_times(counter);
+}
+
+/*
+ * Cross CPU call to disable a performance counter
+ */
+static void __perf_counter_disable(void *info)
+{
+ struct perf_counter *counter = info;
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_counter_context *ctx = counter->ctx;
+
+ /*
+ * If this is a per-task counter, need to check whether this
+ * counter's task is the current task on this cpu.
+ */
+ if (ctx->task && cpuctx->task_ctx != ctx)
+ return;
+
+ spin_lock(&ctx->lock);
+
+ /*
+ * If the counter is on, turn it off.
+ * If it is in error state, leave it in error state.
+ */
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
+ update_context_time(ctx);
+ update_counter_times(counter);
+ if (counter == counter->group_leader)
+ group_sched_out(counter, cpuctx, ctx);
+ else
+ counter_sched_out(counter, cpuctx, ctx);
+ counter->state = PERF_COUNTER_STATE_OFF;
+ }
+
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Disable a counter.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid. This condition is satisifed when called through
+ * perf_counter_for_each_child or perf_counter_for_each because they
+ * hold the top-level counter's child_mutex, so any descendant that
+ * goes to exit will block in sync_child_counter.
+ * When called from perf_pending_counter it's OK because counter->ctx
+ * is the current context on this CPU and preemption is disabled,
+ * hence we can't get into perf_counter_task_sched_out for this context.
+ */
+static void perf_counter_disable(struct perf_counter *counter)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ struct task_struct *task = ctx->task;
+
+ if (!task) {
+ /*
+ * Disable the counter on the cpu that it's on
+ */
+ smp_call_function_single(counter->cpu, __perf_counter_disable,
+ counter, 1);
+ return;
+ }
+
+ retry:
+ task_oncpu_function_call(task, __perf_counter_disable, counter);
+
+ spin_lock_irq(&ctx->lock);
+ /*
+ * If the counter is still active, we need to retry the cross-call.
+ */
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
+ spin_unlock_irq(&ctx->lock);
+ goto retry;
+ }
+
+ /*
+ * Since we have the lock this context can't be scheduled
+ * in, so we can change the state safely.
+ */
+ if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
+ update_counter_times(counter);
+ counter->state = PERF_COUNTER_STATE_OFF;
+ }
+
+ spin_unlock_irq(&ctx->lock);
+}
+
+static int
+counter_sched_in(struct perf_counter *counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx,
+ int cpu)
+{
+ if (counter->state <= PERF_COUNTER_STATE_OFF)
+ return 0;
+
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
+ counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
+ /*
+ * The new state must be visible before we turn it on in the hardware:
+ */
+ smp_wmb();
+
+ if (counter->pmu->enable(counter)) {
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->oncpu = -1;
+ return -EAGAIN;
+ }
+
+ counter->tstamp_running += ctx->time - counter->tstamp_stopped;
+
+ if (!is_software_counter(counter))
+ cpuctx->active_oncpu++;
+ ctx->nr_active++;
+
+ if (counter->attr.exclusive)
+ cpuctx->exclusive = 1;
+
+ return 0;
+}
+
+static int
+group_sched_in(struct perf_counter *group_counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx,
+ int cpu)
+{
+ struct perf_counter *counter, *partial_group;
+ int ret;
+
+ if (group_counter->state == PERF_COUNTER_STATE_OFF)
+ return 0;
+
+ ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
+ if (ret)
+ return ret < 0 ? ret : 0;
+
+ if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
+ return -EAGAIN;
+
+ /*
+ * Schedule in siblings as one group (if any):
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
+ partial_group = counter;
+ goto group_error;
+ }
+ }
+
+ return 0;
+
+group_error:
+ /*
+ * Groups can be scheduled in as one unit only, so undo any
+ * partial group before returning:
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ if (counter == partial_group)
+ break;
+ counter_sched_out(counter, cpuctx, ctx);
+ }
+ counter_sched_out(group_counter, cpuctx, ctx);
+
+ return -EAGAIN;
+}
+
+/*
+ * Return 1 for a group consisting entirely of software counters,
+ * 0 if the group contains any hardware counters.
+ */
+static int is_software_only_group(struct perf_counter *leader)
+{
+ struct perf_counter *counter;
+
+ if (!is_software_counter(leader))
+ return 0;
+
+ list_for_each_entry(counter, &leader->sibling_list, list_entry)
+ if (!is_software_counter(counter))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Work out whether we can put this counter group on the CPU now.
+ */
+static int group_can_go_on(struct perf_counter *counter,
+ struct perf_cpu_context *cpuctx,
+ int can_add_hw)
+{
+ /*
+ * Groups consisting entirely of software counters can always go on.
+ */
+ if (is_software_only_group(counter))
+ return 1;
+ /*
+ * If an exclusive group is already on, no other hardware
+ * counters can go on.
+ */
+ if (cpuctx->exclusive)
+ return 0;
+ /*
+ * If this group is exclusive and there are already
+ * counters on the CPU, it can't go on.
+ */
+ if (counter->attr.exclusive && cpuctx->active_oncpu)
+ return 0;
+ /*
+ * Otherwise, try to add it if all previous groups were able
+ * to go on.
+ */
+ return can_add_hw;
+}
+
+static void add_counter_to_ctx(struct perf_counter *counter,
+ struct perf_counter_context *ctx)
+{
+ list_add_counter(counter, ctx);
+ counter->tstamp_enabled = ctx->time;
+ counter->tstamp_running = ctx->time;
+ counter->tstamp_stopped = ctx->time;
+}
+
+/*
+ * Cross CPU call to install and enable a performance counter
+ *
+ * Must be called with ctx->mutex held
+ */
+static void __perf_install_in_context(void *info)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_counter *counter = info;
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *leader = counter->group_leader;
+ int cpu = smp_processor_id();
+ int err;
+
+ /*
+ * If this is a task context, we need to check whether it is
+ * the current task context of this cpu. If not it has been
+ * scheduled out before the smp call arrived.
+ * Or possibly this is the right context but it isn't
+ * on this cpu because it had no counters.
+ */
+ if (ctx->task && cpuctx->task_ctx != ctx) {
+ if (cpuctx->task_ctx || ctx->task != current)
+ return;
+ cpuctx->task_ctx = ctx;
+ }
+
+ spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ update_context_time(ctx);
+
+ /*
+ * Protect the list operation against NMI by disabling the
+ * counters on a global level. NOP for non NMI based counters.
+ */
+ perf_disable();
+
+ add_counter_to_ctx(counter, ctx);
+
+ /*
+ * Don't put the counter on if it is disabled or if
+ * it is in a group and the group isn't on.
+ */
+ if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
+ (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
+ goto unlock;
+
+ /*
+ * An exclusive counter can't go on if there are already active
+ * hardware counters, and no hardware counter can go on if there
+ * is already an exclusive counter on.
+ */
+ if (!group_can_go_on(counter, cpuctx, 1))
+ err = -EEXIST;
+ else
+ err = counter_sched_in(counter, cpuctx, ctx, cpu);
+
+ if (err) {
+ /*
+ * This counter couldn't go on. If it is in a group
+ * then we have to pull the whole group off.
+ * If the counter group is pinned then put it in error state.
+ */
+ if (leader != counter)
+ group_sched_out(leader, cpuctx, ctx);
+ if (leader->attr.pinned) {
+ update_group_times(leader);
+ leader->state = PERF_COUNTER_STATE_ERROR;
+ }
+ }
+
+ if (!err && !ctx->task && cpuctx->max_pertask)
+ cpuctx->max_pertask--;
+
+ unlock:
+ perf_enable();
+
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Attach a performance counter to a context
+ *
+ * First we add the counter to the list with the hardware enable bit
+ * in counter->hw_config cleared.
+ *
+ * If the counter is attached to a task which is on a CPU we use a smp
+ * call to enable it in the task context. The task might have been
+ * scheduled away, but we check this in the smp call again.
+ *
+ * Must be called with ctx->mutex held.
+ */
+static void
+perf_install_in_context(struct perf_counter_context *ctx,
+ struct perf_counter *counter,
+ int cpu)
+{
+ struct task_struct *task = ctx->task;
+
+ if (!task) {
+ /*
+ * Per cpu counters are installed via an smp call and
+ * the install is always sucessful.
+ */
+ smp_call_function_single(cpu, __perf_install_in_context,
+ counter, 1);
+ return;
+ }
+
+retry:
+ task_oncpu_function_call(task, __perf_install_in_context,
+ counter);
+
+ spin_lock_irq(&ctx->lock);
+ /*
+ * we need to retry the smp call.
+ */
+ if (ctx->is_active && list_empty(&counter->list_entry)) {
+ spin_unlock_irq(&ctx->lock);
+ goto retry;
+ }
+
+ /*
+ * The lock prevents that this context is scheduled in so we
+ * can add the counter safely, if it the call above did not
+ * succeed.
+ */
+ if (list_empty(&counter->list_entry))
+ add_counter_to_ctx(counter, ctx);
+ spin_unlock_irq(&ctx->lock);
+}
+
+/*
+ * Cross CPU call to enable a performance counter
+ */
+static void __perf_counter_enable(void *info)
+{
+ struct perf_counter *counter = info;
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *leader = counter->group_leader;
+ int err;
+
+ /*
+ * If this is a per-task counter, need to check whether this
+ * counter's task is the current task on this cpu.
+ */
+ if (ctx->task && cpuctx->task_ctx != ctx) {
+ if (cpuctx->task_ctx || ctx->task != current)
+ return;
+ cpuctx->task_ctx = ctx;
+ }
+
+ spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ update_context_time(ctx);
+
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ goto unlock;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
+
+ /*
+ * If the counter is in a group and isn't the group leader,
+ * then don't put it on unless the group is on.
+ */
+ if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
+ goto unlock;
+
+ if (!group_can_go_on(counter, cpuctx, 1)) {
+ err = -EEXIST;
+ } else {
+ perf_disable();
+ if (counter == leader)
+ err = group_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ else
+ err = counter_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
+ perf_enable();
+ }
+
+ if (err) {
+ /*
+ * If this counter can't go on and it's part of a
+ * group, then the whole group has to come off.
+ */
+ if (leader != counter)
+ group_sched_out(leader, cpuctx, ctx);
+ if (leader->attr.pinned) {
+ update_group_times(leader);
+ leader->state = PERF_COUNTER_STATE_ERROR;
+ }
+ }
+
+ unlock:
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Enable a counter.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid. This condition is satisfied when called through
+ * perf_counter_for_each_child or perf_counter_for_each as described
+ * for perf_counter_disable.
+ */
+static void perf_counter_enable(struct perf_counter *counter)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ struct task_struct *task = ctx->task;
+
+ if (!task) {
+ /*
+ * Enable the counter on the cpu that it's on
+ */
+ smp_call_function_single(counter->cpu, __perf_counter_enable,
+ counter, 1);
+ return;
+ }
+
+ spin_lock_irq(&ctx->lock);
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ goto out;
+
+ /*
+ * If the counter is in error state, clear that first.
+ * That way, if we see the counter in error state below, we
+ * know that it has gone back into error state, as distinct
+ * from the task having been scheduled away before the
+ * cross-call arrived.
+ */
+ if (counter->state == PERF_COUNTER_STATE_ERROR)
+ counter->state = PERF_COUNTER_STATE_OFF;
+
+ retry:
+ spin_unlock_irq(&ctx->lock);
+ task_oncpu_function_call(task, __perf_counter_enable, counter);
+
+ spin_lock_irq(&ctx->lock);
+
+ /*
+ * If the context is active and the counter is still off,
+ * we need to retry the cross-call.
+ */
+ if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
+ goto retry;
+
+ /*
+ * Since we have the lock this context can't be scheduled
+ * in, so we can change the state safely.
+ */
+ if (counter->state == PERF_COUNTER_STATE_OFF) {
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
+ }
+ out:
+ spin_unlock_irq(&ctx->lock);
+}
+
+static int perf_counter_refresh(struct perf_counter *counter, int refresh)
+{
+ /*
+ * not supported on inherited counters
+ */
+ if (counter->attr.inherit)
+ return -EINVAL;
+
+ atomic_add(refresh, &counter->event_limit);
+ perf_counter_enable(counter);
+
+ return 0;
+}
+
+void __perf_counter_sched_out(struct perf_counter_context *ctx,
+ struct perf_cpu_context *cpuctx)
+{
+ struct perf_counter *counter;
+
+ spin_lock(&ctx->lock);
+ ctx->is_active = 0;
+ if (likely(!ctx->nr_counters))
+ goto out;
+ update_context_time(ctx);
+
+ perf_disable();
+ if (ctx->nr_active) {
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter != counter->group_leader)
+ counter_sched_out(counter, cpuctx, ctx);
+ else
+ group_sched_out(counter, cpuctx, ctx);
+ }
+ }
+ perf_enable();
+ out:
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Test whether two contexts are equivalent, i.e. whether they
+ * have both been cloned from the same version of the same context
+ * and they both have the same number of enabled counters.
+ * If the number of enabled counters is the same, then the set
+ * of enabled counters should be the same, because these are both
+ * inherited contexts, therefore we can't access individual counters
+ * in them directly with an fd; we can only enable/disable all
+ * counters via prctl, or enable/disable all counters in a family
+ * via ioctl, which will have the same effect on both contexts.
+ */
+static int context_equiv(struct perf_counter_context *ctx1,
+ struct perf_counter_context *ctx2)
+{
+ return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
+ && ctx1->parent_gen == ctx2->parent_gen
+ && !ctx1->pin_count && !ctx2->pin_count;
+}
+
+/*
+ * Called from scheduler to remove the counters of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each counter and update the counter value in counter->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of counter _before_
+ * accessing the counter control register. If a NMI hits, then it will
+ * not restart the counter.
+ */
+void perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_counter_context *ctx = task->perf_counter_ctxp;
+ struct perf_counter_context *next_ctx;
+ struct perf_counter_context *parent;
+ struct pt_regs *regs;
+ int do_switch = 1;
+
+ regs = task_pt_regs(task);
+ perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
+
+ if (likely(!ctx || !cpuctx->task_ctx))
+ return;
+
+ update_context_time(ctx);
+
+ rcu_read_lock();
+ parent = rcu_dereference(ctx->parent_ctx);
+ next_ctx = next->perf_counter_ctxp;
+ if (parent && next_ctx &&
+ rcu_dereference(next_ctx->parent_ctx) == parent) {
+ /*
+ * Looks like the two contexts are clones, so we might be
+ * able to optimize the context switch. We lock both
+ * contexts and check that they are clones under the
+ * lock (including re-checking that neither has been
+ * uncloned in the meantime). It doesn't matter which
+ * order we take the locks because no other cpu could
+ * be trying to lock both of these tasks.
+ */
+ spin_lock(&ctx->lock);
+ spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+ if (context_equiv(ctx, next_ctx)) {
+ /*
+ * XXX do we need a memory barrier of sorts
+ * wrt to rcu_dereference() of perf_counter_ctxp
+ */
+ task->perf_counter_ctxp = next_ctx;
+ next->perf_counter_ctxp = ctx;
+ ctx->task = next;
+ next_ctx->task = task;
+ do_switch = 0;
+ }
+ spin_unlock(&next_ctx->lock);
+ spin_unlock(&ctx->lock);
+ }
+ rcu_read_unlock();
+
+ if (do_switch) {
+ __perf_counter_sched_out(ctx, cpuctx);
+ cpuctx->task_ctx = NULL;
+ }
+}
+
+/*
+ * Called with IRQs disabled
+ */
+static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ if (!cpuctx->task_ctx)
+ return;
+
+ if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
+ return;
+
+ __perf_counter_sched_out(ctx, cpuctx);
+ cpuctx->task_ctx = NULL;
+}
+
+/*
+ * Called with IRQs disabled
+ */
+static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
+{
+ __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
+}
+
+static void
+__perf_counter_sched_in(struct perf_counter_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
+{
+ struct perf_counter *counter;
+ int can_add_hw = 1;
+
+ spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ if (likely(!ctx->nr_counters))
+ goto out;
+
+ ctx->timestamp = perf_clock();
+
+ perf_disable();
+
+ /*
+ * First go through the list and put on any pinned groups
+ * in order to give them the best chance of going on.
+ */
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter->state <= PERF_COUNTER_STATE_OFF ||
+ !counter->attr.pinned)
+ continue;
+ if (counter->cpu != -1 && counter->cpu != cpu)
+ continue;
+
+ if (counter != counter->group_leader)
+ counter_sched_in(counter, cpuctx, ctx, cpu);
+ else {
+ if (group_can_go_on(counter, cpuctx, 1))
+ group_sched_in(counter, cpuctx, ctx, cpu);
+ }
+
+ /*
+ * If this pinned group hasn't been scheduled,
+ * put it in error state.
+ */
+ if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
+ update_group_times(counter);
+ counter->state = PERF_COUNTER_STATE_ERROR;
+ }
+ }
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ /*
+ * Ignore counters in OFF or ERROR state, and
+ * ignore pinned counters since we did them already.
+ */
+ if (counter->state <= PERF_COUNTER_STATE_OFF ||
+ counter->attr.pinned)
+ continue;
+
+ /*
+ * Listen to the 'cpu' scheduling filter constraint
+ * of counters:
+ */
+ if (counter->cpu != -1 && counter->cpu != cpu)
+ continue;
+
+ if (counter != counter->group_leader) {
+ if (counter_sched_in(counter, cpuctx, ctx, cpu))
+ can_add_hw = 0;
+ } else {
+ if (group_can_go_on(counter, cpuctx, can_add_hw)) {
+ if (group_sched_in(counter, cpuctx, ctx, cpu))
+ can_add_hw = 0;
+ }
+ }
+ }
+ perf_enable();
+ out:
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Called from scheduler to add the counters of the current task
+ * with interrupts disabled.
+ *
+ * We restore the counter value and then enable it.
+ *
+ * This does not protect us against NMI, but enable()
+ * sets the enabled bit in the control field of counter _before_
+ * accessing the counter control register. If a NMI hits, then it will
+ * keep the counter running.
+ */
+void perf_counter_task_sched_in(struct task_struct *task, int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_counter_context *ctx = task->perf_counter_ctxp;
+
+ if (likely(!ctx))
+ return;
+ if (cpuctx->task_ctx == ctx)
+ return;
+ __perf_counter_sched_in(ctx, cpuctx, cpu);
+ cpuctx->task_ctx = ctx;
+}
+
+static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
+{
+ struct perf_counter_context *ctx = &cpuctx->ctx;
+
+ __perf_counter_sched_in(ctx, cpuctx, cpu);
+}
+
+#define MAX_INTERRUPTS (~0ULL)
+
+static void perf_log_throttle(struct perf_counter *counter, int enable);
+static void perf_log_period(struct perf_counter *counter, u64 period);
+
+static void perf_adjust_period(struct perf_counter *counter, u64 events)
+{
+ struct hw_perf_counter *hwc = &counter->hw;
+ u64 period, sample_period;
+ s64 delta;
+
+ events *= hwc->sample_period;
+ period = div64_u64(events, counter->attr.sample_freq);
+
+ delta = (s64)(period - hwc->sample_period);
+ delta = (delta + 7) / 8; /* low pass filter */
+
+ sample_period = hwc->sample_period + delta;
+
+ if (!sample_period)
+ sample_period = 1;
+
+ perf_log_period(counter, sample_period);
+
+ hwc->sample_period = sample_period;
+}
+
+static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
+{
+ struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
+ u64 interrupts, freq;
+
+ spin_lock(&ctx->lock);
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ continue;
+
+ hwc = &counter->hw;
+
+ interrupts = hwc->interrupts;
+ hwc->interrupts = 0;
+
+ /*
+ * unthrottle counters on the tick
+ */
+ if (interrupts == MAX_INTERRUPTS) {
+ perf_log_throttle(counter, 1);
+ counter->pmu->unthrottle(counter);
+ interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
+ }
+
+ if (!counter->attr.freq || !counter->attr.sample_freq)
+ continue;
+
+ /*
+ * if the specified freq < HZ then we need to skip ticks
+ */
+ if (counter->attr.sample_freq < HZ) {
+ freq = counter->attr.sample_freq;
+
+ hwc->freq_count += freq;
+ hwc->freq_interrupts += interrupts;
+
+ if (hwc->freq_count < HZ)
+ continue;
+
+ interrupts = hwc->freq_interrupts;
+ hwc->freq_interrupts = 0;
+ hwc->freq_count -= HZ;
+ } else
+ freq = HZ;
+
+ perf_adjust_period(counter, freq * interrupts);
+
+ /*
+ * In order to avoid being stalled by an (accidental) huge
+ * sample period, force reset the sample period if we didn't
+ * get any events in this freq period.
+ */
+ if (!interrupts) {
+ perf_disable();
+ counter->pmu->disable(counter);
+ atomic_set(&hwc->period_left, 0);
+ counter->pmu->enable(counter);
+ perf_enable();
+ }
+ }
+ spin_unlock(&ctx->lock);
+}
+
+/*
+ * Round-robin a context's counters:
+ */
+static void rotate_ctx(struct perf_counter_context *ctx)
+{
+ struct perf_counter *counter;
+
+ if (!ctx->nr_counters)
+ return;
+
+ spin_lock(&ctx->lock);
+ /*
+ * Rotate the first entry last (works just fine for group counters too):
+ */
+ perf_disable();
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ list_move_tail(&counter->list_entry, &ctx->counter_list);
+ break;
+ }
+ perf_enable();
+
+ spin_unlock(&ctx->lock);
+}
+
+void perf_counter_task_tick(struct task_struct *curr, int cpu)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+
+ if (!atomic_read(&nr_counters))
+ return;
+
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ ctx = curr->perf_counter_ctxp;
+
+ perf_ctx_adjust_freq(&cpuctx->ctx);
+ if (ctx)
+ perf_ctx_adjust_freq(ctx);
+
+ perf_counter_cpu_sched_out(cpuctx);
+ if (ctx)
+ __perf_counter_task_sched_out(ctx);
+
+ rotate_ctx(&cpuctx->ctx);
+ if (ctx)
+ rotate_ctx(ctx);
+
+ perf_counter_cpu_sched_in(cpuctx, cpu);
+ if (ctx)
+ perf_counter_task_sched_in(curr, cpu);
+}
+
+/*
+ * Cross CPU call to read the hardware counter
+ */
+static void __read(void *info)
+{
+ struct perf_counter *counter = info;
+ struct perf_counter_context *ctx = counter->ctx;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (ctx->is_active)
+ update_context_time(ctx);
+ counter->pmu->read(counter);
+ update_counter_times(counter);
+ local_irq_restore(flags);
+}
+
+static u64 perf_counter_read(struct perf_counter *counter)
+{
+ /*
+ * If counter is enabled and currently active on a CPU, update the
+ * value in the counter structure:
+ */
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
+ smp_call_function_single(counter->oncpu,
+ __read, counter, 1);
+ } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
+ update_counter_times(counter);
+ }
+
+ return atomic64_read(&counter->count);
+}
+
+/*
+ * Initialize the perf_counter context in a task_struct:
+ */
+static void
+__perf_counter_init_context(struct perf_counter_context *ctx,
+ struct task_struct *task)
+{
+ memset(ctx, 0, sizeof(*ctx));
+ spin_lock_init(&ctx->lock);
+ mutex_init(&ctx->mutex);
+ INIT_LIST_HEAD(&ctx->counter_list);
+ INIT_LIST_HEAD(&ctx->event_list);
+ atomic_set(&ctx->refcount, 1);
+ ctx->task = task;
+}
+
+static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
+{
+ struct perf_counter_context *parent_ctx;
+ struct perf_counter_context *ctx;
+ struct perf_cpu_context *cpuctx;
+ struct task_struct *task;
+ unsigned long flags;
+ int err;
+
+ /*
+ * If cpu is not a wildcard then this is a percpu counter:
+ */
+ if (cpu != -1) {
+ /* Must be root to operate on a CPU counter: */
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EACCES);
+
+ if (cpu < 0 || cpu > num_possible_cpus())
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * We could be clever and allow to attach a counter to an
+ * offline CPU and activate it when the CPU comes up, but
+ * that's for later.
+ */
+ if (!cpu_isset(cpu, cpu_online_map))
+ return ERR_PTR(-ENODEV);
+
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ ctx = &cpuctx->ctx;
+ get_ctx(ctx);
+
+ return ctx;
+ }
+
+ rcu_read_lock();
+ if (!pid)
+ task = current;
+ else
+ task = find_task_by_vpid(pid);
+ if (task)
+ get_task_struct(task);
+ rcu_read_unlock();
+
+ if (!task)
+ return ERR_PTR(-ESRCH);
+
+ /*
+ * Can't attach counters to a dying task.
+ */
+ err = -ESRCH;
+ if (task->flags & PF_EXITING)
+ goto errout;
+
+ /* Reuse ptrace permission checks for now. */
+ err = -EACCES;
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ goto errout;
+
+ retry:
+ ctx = perf_lock_task_context(task, &flags);
+ if (ctx) {
+ parent_ctx = ctx->parent_ctx;
+ if (parent_ctx) {
+ put_ctx(parent_ctx);
+ ctx->parent_ctx = NULL; /* no longer a clone */
+ }
+ /*
+ * Get an extra reference before dropping the lock so that
+ * this context won't get freed if the task exits.
+ */
+ get_ctx(ctx);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ }
+
+ if (!ctx) {
+ ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
+ err = -ENOMEM;
+ if (!ctx)
+ goto errout;
+ __perf_counter_init_context(ctx, task);
+ get_ctx(ctx);
+ if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
+ /*
+ * We raced with some other task; use
+ * the context they set.
+ */
+ kfree(ctx);
+ goto retry;
+ }
+ get_task_struct(task);
+ }
+
+ put_task_struct(task);
+ return ctx;
+
+ errout:
+ put_task_struct(task);
+ return ERR_PTR(err);
+}
+
+static void free_counter_rcu(struct rcu_head *head)
+{
+ struct perf_counter *counter;
+
+ counter = container_of(head, struct perf_counter, rcu_head);
+ if (counter->ns)
+ put_pid_ns(counter->ns);
+ kfree(counter);
+}
+
+static void perf_pending_sync(struct perf_counter *counter);
+
+static void free_counter(struct perf_counter *counter)
+{
+ perf_pending_sync(counter);
+
+ atomic_dec(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_dec(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_dec(&nr_comm_counters);
+
+ if (counter->destroy)
+ counter->destroy(counter);
+
+ put_ctx(counter->ctx);
+ call_rcu(&counter->rcu_head, free_counter_rcu);
+}
+
+/*
+ * Called when the last reference to the file is gone.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+ struct perf_counter *counter = file->private_data;
+ struct perf_counter_context *ctx = counter->ctx;
+
+ file->private_data = NULL;
+
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_counter_remove_from_context(counter);
+ mutex_unlock(&ctx->mutex);
+
+ mutex_lock(&counter->owner->perf_counter_mutex);
+ list_del_init(&counter->owner_entry);
+ mutex_unlock(&counter->owner->perf_counter_mutex);
+ put_task_struct(counter->owner);
+
+ free_counter(counter);
+
+ return 0;
+}
+
+/*
+ * Read the performance counter - simple non blocking version for now
+ */
+static ssize_t
+perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
+{
+ u64 values[3];
+ int n;
+
+ /*
+ * Return end-of-file for a read on a counter that is in
+ * error state (i.e. because it was pinned but it couldn't be
+ * scheduled on to the CPU at some point).
+ */
+ if (counter->state == PERF_COUNTER_STATE_ERROR)
+ return 0;
+
+ WARN_ON_ONCE(counter->ctx->parent_ctx);
+ mutex_lock(&counter->child_mutex);
+ values[0] = perf_counter_read(counter);
+ n = 1;
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ values[n++] = counter->total_time_enabled +
+ atomic64_read(&counter->child_total_time_enabled);
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ values[n++] = counter->total_time_running +
+ atomic64_read(&counter->child_total_time_running);
+ if (counter->attr.read_format & PERF_FORMAT_ID)
+ values[n++] = counter->id;
+ mutex_unlock(&counter->child_mutex);
+
+ if (count < n * sizeof(u64))
+ return -EINVAL;
+ count = n * sizeof(u64);
+
+ if (copy_to_user(buf, values, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static ssize_t
+perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct perf_counter *counter = file->private_data;
+
+ return perf_read_hw(counter, buf, count);
+}
+
+static unsigned int perf_poll(struct file *file, poll_table *wait)
+{
+ struct perf_counter *counter = file->private_data;
+ struct perf_mmap_data *data;
+ unsigned int events = POLL_HUP;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (data)
+ events = atomic_xchg(&data->poll, 0);
+ rcu_read_unlock();
+
+ poll_wait(file, &counter->waitq, wait);
+
+ return events;
+}
+
+static void perf_counter_reset(struct perf_counter *counter)
+{
+ (void)perf_counter_read(counter);
+ atomic64_set(&counter->count, 0);
+ perf_counter_update_userpage(counter);
+}
+
+static void perf_counter_for_each_sibling(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ struct perf_counter *sibling;
+
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ counter = counter->group_leader;
+
+ func(counter);
+ list_for_each_entry(sibling, &counter->sibling_list, list_entry)
+ func(sibling);
+ mutex_unlock(&ctx->mutex);
+}
+
+/*
+ * Holding the top-level counter's child_mutex means that any
+ * descendant process that has inherited this counter will block
+ * in sync_child_counter if it goes to exit, thus satisfying the
+ * task existence requirements of perf_counter_enable/disable.
+ */
+static void perf_counter_for_each_child(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter *child;
+
+ WARN_ON_ONCE(counter->ctx->parent_ctx);
+ mutex_lock(&counter->child_mutex);
+ func(counter);
+ list_for_each_entry(child, &counter->child_list, child_list)
+ func(child);
+ mutex_unlock(&counter->child_mutex);
+}
+
+static void perf_counter_for_each(struct perf_counter *counter,
+ void (*func)(struct perf_counter *))
+{
+ struct perf_counter *child;
+
+ WARN_ON_ONCE(counter->ctx->parent_ctx);
+ mutex_lock(&counter->child_mutex);
+ perf_counter_for_each_sibling(counter, func);
+ list_for_each_entry(child, &counter->child_list, child_list)
+ perf_counter_for_each_sibling(child, func);
+ mutex_unlock(&counter->child_mutex);
+}
+
+static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ unsigned long size;
+ int ret = 0;
+ u64 value;
+
+ if (!counter->attr.sample_period)
+ return -EINVAL;
+
+ size = copy_from_user(&value, arg, sizeof(value));
+ if (size != sizeof(value))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->lock);
+ if (counter->attr.freq) {
+ if (value > sysctl_perf_counter_sample_rate) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ counter->attr.sample_freq = value;
+ } else {
+ perf_log_period(counter, value);
+
+ counter->attr.sample_period = value;
+ counter->hw.sample_period = value;
+ }
+unlock:
+ spin_unlock_irq(&ctx->lock);
+
+ return ret;
+}
+
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct perf_counter *counter = file->private_data;
+ void (*func)(struct perf_counter *);
+ u32 flags = arg;
+
+ switch (cmd) {
+ case PERF_COUNTER_IOC_ENABLE:
+ func = perf_counter_enable;
+ break;
+ case PERF_COUNTER_IOC_DISABLE:
+ func = perf_counter_disable;
+ break;
+ case PERF_COUNTER_IOC_RESET:
+ func = perf_counter_reset;
+ break;
+
+ case PERF_COUNTER_IOC_REFRESH:
+ return perf_counter_refresh(counter, arg);
+
+ case PERF_COUNTER_IOC_PERIOD:
+ return perf_counter_period(counter, (u64 __user *)arg);
+
+ default:
+ return -ENOTTY;
+ }
+
+ if (flags & PERF_IOC_FLAG_GROUP)
+ perf_counter_for_each(counter, func);
+ else
+ perf_counter_for_each_child(counter, func);
+
+ return 0;
+}
+
+int perf_counter_task_enable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_enable);
+ mutex_unlock(&current->perf_counter_mutex);
+
+ return 0;
+}
+
+int perf_counter_task_disable(void)
+{
+ struct perf_counter *counter;
+
+ mutex_lock(&current->perf_counter_mutex);
+ list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+ perf_counter_for_each_child(counter, perf_counter_disable);
+ mutex_unlock(&current->perf_counter_mutex);
+
+ return 0;
+}
+
+/*
+ * Callers need to ensure there can be no nesting of this function, otherwise
+ * the seqlock logic goes bad. We can not serialize this because the arch
+ * code calls this from NMI context.
+ */
+void perf_counter_update_userpage(struct perf_counter *counter)
+{
+ struct perf_counter_mmap_page *userpg;
+ struct perf_mmap_data *data;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto unlock;
+
+ userpg = data->user_page;
+
+ /*
+ * Disable preemption so as to not let the corresponding user-space
+ * spin too long if we get preempted.
+ */
+ preempt_disable();
+ ++userpg->lock;
+ barrier();
+ userpg->index = counter->hw.idx;
+ userpg->offset = atomic64_read(&counter->count);
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+ userpg->offset -= atomic64_read(&counter->hw.prev_count);
+
+ barrier();
+ ++userpg->lock;
+ preempt_enable();
+unlock:
+ rcu_read_unlock();
+}
+
+static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct perf_counter *counter = vma->vm_file->private_data;
+ struct perf_mmap_data *data;
+ int ret = VM_FAULT_SIGBUS;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto unlock;
+
+ if (vmf->pgoff == 0) {
+ vmf->page = virt_to_page(data->user_page);
+ } else {
+ int nr = vmf->pgoff - 1;
+
+ if ((unsigned)nr > data->nr_pages)
+ goto unlock;
+
+ vmf->page = virt_to_page(data->data_pages[nr]);
+ }
+ get_page(vmf->page);
+ ret = 0;
+unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
+{
+ struct perf_mmap_data *data;
+ unsigned long size;
+ int i;
+
+ WARN_ON(atomic_read(&counter->mmap_count));
+
+ size = sizeof(struct perf_mmap_data);
+ size += nr_pages * sizeof(void *);
+
+ data = kzalloc(size, GFP_KERNEL);
+ if (!data)
+ goto fail;
+
+ data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!data->user_page)
+ goto fail_user_page;
+
+ for (i = 0; i < nr_pages; i++) {
+ data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!data->data_pages[i])
+ goto fail_data_pages;
+ }
+
+ data->nr_pages = nr_pages;
+ atomic_set(&data->lock, -1);
+
+ rcu_assign_pointer(counter->data, data);
+
+ return 0;
+
+fail_data_pages:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)data->data_pages[i]);
+
+ free_page((unsigned long)data->user_page);
+
+fail_user_page:
+ kfree(data);
+
+fail:
+ return -ENOMEM;
+}
+
+static void __perf_mmap_data_free(struct rcu_head *rcu_head)
+{
+ struct perf_mmap_data *data;
+ int i;
+
+ data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
+
+ free_page((unsigned long)data->user_page);
+ for (i = 0; i < data->nr_pages; i++)
+ free_page((unsigned long)data->data_pages[i]);
+ kfree(data);
+}
+
+static void perf_mmap_data_free(struct perf_counter *counter)
+{
+ struct perf_mmap_data *data = counter->data;
+
+ WARN_ON(atomic_read(&counter->mmap_count));
+
+ rcu_assign_pointer(counter->data, NULL);
+ call_rcu(&data->rcu_head, __perf_mmap_data_free);
+}
+
+static void perf_mmap_open(struct vm_area_struct *vma)
+{
+ struct perf_counter *counter = vma->vm_file->private_data;
+
+ atomic_inc(&counter->mmap_count);
+}
+
+static void perf_mmap_close(struct vm_area_struct *vma)
+{
+ struct perf_counter *counter = vma->vm_file->private_data;
+
+ WARN_ON_ONCE(counter->ctx->parent_ctx);
+ if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
+ struct user_struct *user = current_user();
+
+ atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
+ vma->vm_mm->locked_vm -= counter->data->nr_locked;
+ perf_mmap_data_free(counter);
+ mutex_unlock(&counter->mmap_mutex);
+ }
+}
+
+static struct vm_operations_struct perf_mmap_vmops = {
+ .open = perf_mmap_open,
+ .close = perf_mmap_close,
+ .fault = perf_mmap_fault,
+};
+
+static int perf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct perf_counter *counter = file->private_data;
+ unsigned long user_locked, user_lock_limit;
+ struct user_struct *user = current_user();
+ unsigned long locked, lock_limit;
+ unsigned long vma_size;
+ unsigned long nr_pages;
+ long user_extra, extra;
+ int ret = 0;
+
+ if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
+ return -EINVAL;
+
+ vma_size = vma->vm_end - vma->vm_start;
+ nr_pages = (vma_size / PAGE_SIZE) - 1;
+
+ /*
+ * If we have data pages ensure they're a power-of-two number, so we
+ * can do bitmasks instead of modulo.
+ */
+ if (nr_pages != 0 && !is_power_of_2(nr_pages))
+ return -EINVAL;
+
+ if (vma_size != PAGE_SIZE * (1 + nr_pages))
+ return -EINVAL;
+
+ if (vma->vm_pgoff != 0)
+ return -EINVAL;
+
+ WARN_ON_ONCE(counter->ctx->parent_ctx);
+ mutex_lock(&counter->mmap_mutex);
+ if (atomic_inc_not_zero(&counter->mmap_count)) {
+ if (nr_pages != counter->data->nr_pages)
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ user_extra = nr_pages + 1;
+ user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+
+ /*
+ * Increase the limit linearly with more CPUs:
+ */
+ user_lock_limit *= num_online_cpus();
+
+ user_locked = atomic_long_read(&user->locked_vm) + user_extra;
+
+ extra = 0;
+ if (user_locked > user_lock_limit)
+ extra = user_locked - user_lock_limit;
+
+ lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit >>= PAGE_SHIFT;
+ locked = vma->vm_mm->locked_vm + extra;
+
+ if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
+ WARN_ON(counter->data);
+ ret = perf_mmap_data_alloc(counter, nr_pages);
+ if (ret)
+ goto unlock;
+
+ atomic_set(&counter->mmap_count, 1);
+ atomic_long_add(user_extra, &user->locked_vm);
+ vma->vm_mm->locked_vm += extra;
+ counter->data->nr_locked = extra;
+unlock:
+ mutex_unlock(&counter->mmap_mutex);
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &perf_mmap_vmops;
+
+ return ret;
+}
+
+static int perf_fasync(int fd, struct file *filp, int on)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct perf_counter *counter = filp->private_data;
+ int retval;
+
+ mutex_lock(&inode->i_mutex);
+ retval = fasync_helper(fd, filp, on, &counter->fasync);
+ mutex_unlock(&inode->i_mutex);
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static const struct file_operations perf_fops = {
+ .release = perf_release,
+ .read = perf_read,
+ .poll = perf_poll,
+ .unlocked_ioctl = perf_ioctl,
+ .compat_ioctl = perf_ioctl,
+ .mmap = perf_mmap,
+ .fasync = perf_fasync,
+};
+
+/*
+ * Perf counter wakeup
+ *
+ * If there's data, ensure we set the poll() state and publish everything
+ * to user-space before waking everybody up.
+ */
+
+void perf_counter_wakeup(struct perf_counter *counter)
+{
+ wake_up_all(&counter->waitq);
+
+ if (counter->pending_kill) {
+ kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
+ counter->pending_kill = 0;
+ }
+}
+
+/*
+ * Pending wakeups
+ *
+ * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
+ *
+ * The NMI bit means we cannot possibly take locks. Therefore, maintain a
+ * single linked list and use cmpxchg() to add entries lockless.
+ */
+
+static void perf_pending_counter(struct perf_pending_entry *entry)
+{
+ struct perf_counter *counter = container_of(entry,
+ struct perf_counter, pending);
+
+ if (counter->pending_disable) {
+ counter->pending_disable = 0;
+ perf_counter_disable(counter);
+ }
+
+ if (counter->pending_wakeup) {
+ counter->pending_wakeup = 0;
+ perf_counter_wakeup(counter);
+ }
+}
+
+#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
+
+static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
+ PENDING_TAIL,
+};
+
+static void perf_pending_queue(struct perf_pending_entry *entry,
+ void (*func)(struct perf_pending_entry *))
+{
+ struct perf_pending_entry **head;
+
+ if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
+ return;
+
+ entry->func = func;
+
+ head = &get_cpu_var(perf_pending_head);
+
+ do {
+ entry->next = *head;
+ } while (cmpxchg(head, entry->next, entry) != entry->next);
+
+ set_perf_counter_pending();
+
+ put_cpu_var(perf_pending_head);
+}
+
+static int __perf_pending_run(void)
+{
+ struct perf_pending_entry *list;
+ int nr = 0;
+
+ list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
+ while (list != PENDING_TAIL) {
+ void (*func)(struct perf_pending_entry *);
+ struct perf_pending_entry *entry = list;
+
+ list = list->next;
+
+ func = entry->func;
+ entry->next = NULL;
+ /*
+ * Ensure we observe the unqueue before we issue the wakeup,
+ * so that we won't be waiting forever.
+ * -- see perf_not_pending().
+ */
+ smp_wmb();
+
+ func(entry);
+ nr++;
+ }
+
+ return nr;
+}
+
+static inline int perf_not_pending(struct perf_counter *counter)
+{
+ /*
+ * If we flush on whatever cpu we run, there is a chance we don't
+ * need to wait.
+ */
+ get_cpu();
+ __perf_pending_run();
+ put_cpu();
+
+ /*
+ * Ensure we see the proper queue state before going to sleep
+ * so that we do not miss the wakeup. -- see perf_pending_handle()
+ */
+ smp_rmb();
+ return counter->pending.next == NULL;
+}
+
+static void perf_pending_sync(struct perf_counter *counter)
+{
+ wait_event(counter->waitq, perf_not_pending(counter));
+}
+
+void perf_counter_do_pending(void)
+{
+ __perf_pending_run();
+}
+
+/*
+ * Callchain support -- arch specific
+ */
+
+__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+ return NULL;
+}
+
+/*
+ * Output
+ */
+
+struct perf_output_handle {
+ struct perf_counter *counter;
+ struct perf_mmap_data *data;
+ unsigned long head;
+ unsigned long offset;
+ int nmi;
+ int overflow;
+ int locked;
+ unsigned long flags;
+};
+
+static void perf_output_wakeup(struct perf_output_handle *handle)
+{
+ atomic_set(&handle->data->poll, POLL_IN);
+
+ if (handle->nmi) {
+ handle->counter->pending_wakeup = 1;
+ perf_pending_queue(&handle->counter->pending,
+ perf_pending_counter);
+ } else
+ perf_counter_wakeup(handle->counter);
+}
+
+/*
+ * Curious locking construct.
+ *
+ * We need to ensure a later event doesn't publish a head when a former
+ * event isn't done writing. However since we need to deal with NMIs we
+ * cannot fully serialize things.
+ *
+ * What we do is serialize between CPUs so we only have to deal with NMI
+ * nesting on a single CPU.
+ *
+ * We only publish the head (and generate a wakeup) when the outer-most
+ * event completes.
+ */
+static void perf_output_lock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ int cpu;
+
+ handle->locked = 0;
+
+ local_irq_save(handle->flags);
+ cpu = smp_processor_id();
+
+ if (in_nmi() && atomic_read(&data->lock) == cpu)
+ return;
+
+ while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
+ cpu_relax();
+
+ handle->locked = 1;
+}
+
+static void perf_output_unlock(struct perf_output_handle *handle)
+{
+ struct perf_mmap_data *data = handle->data;
+ unsigned long head;
+ int cpu;
+
+ data->done_head = data->head;
+
+ if (!handle->locked)
+ goto out;
+
+again:
+ /*
+ * The xchg implies a full barrier that ensures all writes are done
+ * before we publish the new head, matched by a rmb() in userspace when
+ * reading this position.
+ */
+ while ((head = atomic_long_xchg(&data->done_head, 0)))
+ data->user_page->data_head = head;
+
+ /*
+ * NMI can happen here, which means we can miss a done_head update.
+ */
+
+ cpu = atomic_xchg(&data->lock, -1);
+ WARN_ON_ONCE(cpu != smp_processor_id());
+
+ /*
+ * Therefore we have to validate we did not indeed do so.
+ */
+ if (unlikely(atomic_long_read(&data->done_head))) {
+ /*
+ * Since we had it locked, we can lock it again.
+ */
+ while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
+ cpu_relax();
+
+ goto again;
+ }
+
+ if (atomic_xchg(&data->wakeup, 0))
+ perf_output_wakeup(handle);
+out:
+ local_irq_restore(handle->flags);
+}
+
+static int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_counter *counter, unsigned int size,
+ int nmi, int overflow)
+{
+ struct perf_mmap_data *data;
+ unsigned int offset, head;
+
+ /*
+ * For inherited counters we send all the output towards the parent.
+ */
+ if (counter->parent)
+ counter = counter->parent;
+
+ rcu_read_lock();
+ data = rcu_dereference(counter->data);
+ if (!data)
+ goto out;
+
+ handle->data = data;
+ handle->counter = counter;
+ handle->nmi = nmi;
+ handle->overflow = overflow;
+
+ if (!data->nr_pages)
+ goto fail;
+
+ perf_output_lock(handle);
+
+ do {
+ offset = head = atomic_long_read(&data->head);
+ head += size;
+ } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
+
+ handle->offset = offset;
+ handle->head = head;
+
+ if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
+ atomic_set(&data->wakeup, 1);
+
+ return 0;
+
+fail:
+ perf_output_wakeup(handle);
+out:
+ rcu_read_unlock();
+
+ return -ENOSPC;
+}
+
+static void perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len)
+{
+ unsigned int pages_mask;
+ unsigned int offset;
+ unsigned int size;
+ void **pages;
+
+ offset = handle->offset;
+ pages_mask = handle->data->nr_pages - 1;
+ pages = handle->data->data_pages;
+
+ do {
+ unsigned int page_offset;
+ int nr;
+
+ nr = (offset >> PAGE_SHIFT) & pages_mask;
+ page_offset = offset & (PAGE_SIZE - 1);
+ size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
+
+ memcpy(pages[nr] + page_offset, buf, size);
+
+ len -= size;
+ buf += size;
+ offset += size;
+ } while (len);
+
+ handle->offset = offset;
+
+ /*
+ * Check we didn't copy past our reservation window, taking the
+ * possible unsigned int wrap into account.
+ */
+ WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
+}
+
+#define perf_output_put(handle, x) \
+ perf_output_copy((handle), &(x), sizeof(x))
+
+static void perf_output_end(struct perf_output_handle *handle)
+{
+ struct perf_counter *counter = handle->counter;
+ struct perf_mmap_data *data = handle->data;
+
+ int wakeup_events = counter->attr.wakeup_events;
+
+ if (handle->overflow && wakeup_events) {
+ int events = atomic_inc_return(&data->events);
+ if (events >= wakeup_events) {
+ atomic_sub(wakeup_events, &data->events);
+ atomic_set(&data->wakeup, 1);
+ }
+ }
+
+ perf_output_unlock(handle);
+ rcu_read_unlock();
+}
+
+static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
+{
+ /*
+ * only top level counters have the pid namespace they were created in
+ */
+ if (counter->parent)
+ counter = counter->parent;
+
+ return task_tgid_nr_ns(p, counter->ns);
+}
+
+static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
+{
+ /*
+ * only top level counters have the pid namespace they were created in
+ */
+ if (counter->parent)
+ counter = counter->parent;
+
+ return task_pid_nr_ns(p, counter->ns);
+}
+
+static void perf_counter_output(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data)
+{
+ int ret;
+ u64 sample_type = counter->attr.sample_type;
+ struct perf_output_handle handle;
+ struct perf_event_header header;
+ u64 ip;
+ struct {
+ u32 pid, tid;
+ } tid_entry;
+ struct {
+ u64 id;
+ u64 counter;
+ } group_entry;
+ struct perf_callchain_entry *callchain = NULL;
+ int callchain_size = 0;
+ u64 time;
+ struct {
+ u32 cpu, reserved;
+ } cpu_entry;
+
+ header.type = 0;
+ header.size = sizeof(header);
+
+ header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc |= perf_misc_flags(data->regs);
+
+ if (sample_type & PERF_SAMPLE_IP) {
+ ip = perf_instruction_pointer(data->regs);
+ header.type |= PERF_SAMPLE_IP;
+ header.size += sizeof(ip);
+ }
+
+ if (sample_type & PERF_SAMPLE_TID) {
+ /* namespace issues */
+ tid_entry.pid = perf_counter_pid(counter, current);
+ tid_entry.tid = perf_counter_tid(counter, current);
+
+ header.type |= PERF_SAMPLE_TID;
+ header.size += sizeof(tid_entry);
+ }
+
+ if (sample_type & PERF_SAMPLE_TIME) {
+ /*
+ * Maybe do better on x86 and provide cpu_clock_nmi()
+ */
+ time = sched_clock();
+
+ header.type |= PERF_SAMPLE_TIME;
+ header.size += sizeof(u64);
+ }
+
+ if (sample_type & PERF_SAMPLE_ADDR) {
+ header.type |= PERF_SAMPLE_ADDR;
+ header.size += sizeof(u64);
+ }
+
+ if (sample_type & PERF_SAMPLE_ID) {
+ header.type |= PERF_SAMPLE_ID;
+ header.size += sizeof(u64);
+ }
+
+ if (sample_type & PERF_SAMPLE_CPU) {
+ header.type |= PERF_SAMPLE_CPU;
+ header.size += sizeof(cpu_entry);
+
+ cpu_entry.cpu = raw_smp_processor_id();
+ }
+
+ if (sample_type & PERF_SAMPLE_PERIOD) {
+ header.type |= PERF_SAMPLE_PERIOD;
+ header.size += sizeof(u64);
+ }
+
+ if (sample_type & PERF_SAMPLE_GROUP) {
+ header.type |= PERF_SAMPLE_GROUP;
+ header.size += sizeof(u64) +
+ counter->nr_siblings * sizeof(group_entry);
+ }
+
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ callchain = perf_callchain(data->regs);
+
+ if (callchain) {
+ callchain_size = (1 + callchain->nr) * sizeof(u64);
+
+ header.type |= PERF_SAMPLE_CALLCHAIN;
+ header.size += callchain_size;
+ }
+ }
+
+ ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, header);
+
+ if (sample_type & PERF_SAMPLE_IP)
+ perf_output_put(&handle, ip);
+
+ if (sample_type & PERF_SAMPLE_TID)
+ perf_output_put(&handle, tid_entry);
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ perf_output_put(&handle, time);
+
+ if (sample_type & PERF_SAMPLE_ADDR)
+ perf_output_put(&handle, data->addr);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ perf_output_put(&handle, counter->id);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ perf_output_put(&handle, cpu_entry);
+
+ if (sample_type & PERF_SAMPLE_PERIOD)
+ perf_output_put(&handle, data->period);
+
+ /*
+ * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
+ */
+ if (sample_type & PERF_SAMPLE_GROUP) {
+ struct perf_counter *leader, *sub;
+ u64 nr = counter->nr_siblings;
+
+ perf_output_put(&handle, nr);
+
+ leader = counter->group_leader;
+ list_for_each_entry(sub, &leader->sibling_list, list_entry) {
+ if (sub != counter)
+ sub->pmu->read(sub);
+
+ group_entry.id = sub->id;
+ group_entry.counter = atomic64_read(&sub->count);
+
+ perf_output_put(&handle, group_entry);
+ }
+ }
+
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
+
+ perf_output_end(&handle);
+}
+
+/*
+ * fork tracking
+ */
+
+struct perf_fork_event {
+ struct task_struct *task;
+
+ struct {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 ppid;
+ } event;
+};
+
+static void perf_counter_fork_output(struct perf_counter *counter,
+ struct perf_fork_event *fork_event)
+{
+ struct perf_output_handle handle;
+ int size = fork_event->event.header.size;
+ struct task_struct *task = fork_event->task;
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+ if (ret)
+ return;
+
+ fork_event->event.pid = perf_counter_pid(counter, task);
+ fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
+
+ perf_output_put(&handle, fork_event->event);
+ perf_output_end(&handle);
+}
+
+static int perf_counter_fork_match(struct perf_counter *counter)
+{
+ if (counter->attr.comm || counter->attr.mmap)
+ return 1;
+
+ return 0;
+}
+
+static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
+ struct perf_fork_event *fork_event)
+{
+ struct perf_counter *counter;
+
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_counter_fork_match(counter))
+ perf_counter_fork_output(counter, fork_event);
+ }
+ rcu_read_unlock();
+}
+
+static void perf_counter_fork_event(struct perf_fork_event *fork_event)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+
+ cpuctx = &get_cpu_var(perf_cpu_context);
+ perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
+ put_cpu_var(perf_cpu_context);
+
+ rcu_read_lock();
+ /*
+ * doesn't really matter which of the child contexts the
+ * events ends up in.
+ */
+ ctx = rcu_dereference(current->perf_counter_ctxp);
+ if (ctx)
+ perf_counter_fork_ctx(ctx, fork_event);
+ rcu_read_unlock();
+}
+
+void perf_counter_fork(struct task_struct *task)
+{
+ struct perf_fork_event fork_event;
+
+ if (!atomic_read(&nr_comm_counters) &&
+ !atomic_read(&nr_mmap_counters))
+ return;
+
+ fork_event = (struct perf_fork_event){
+ .task = task,
+ .event = {
+ .header = {
+ .type = PERF_EVENT_FORK,
+ .size = sizeof(fork_event.event),
+ },
+ },
+ };
+
+ perf_counter_fork_event(&fork_event);
+}
+
+/*
+ * comm tracking
+ */
+
+struct perf_comm_event {
+ struct task_struct *task;
+ char *comm;
+ int comm_size;
+
+ struct {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ } event;
+};
+
+static void perf_counter_comm_output(struct perf_counter *counter,
+ struct perf_comm_event *comm_event)
+{
+ struct perf_output_handle handle;
+ int size = comm_event->event.header.size;
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+ if (ret)
+ return;
+
+ comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
+ comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
+
+ perf_output_put(&handle, comm_event->event);
+ perf_output_copy(&handle, comm_event->comm,
+ comm_event->comm_size);
+ perf_output_end(&handle);
+}
+
+static int perf_counter_comm_match(struct perf_counter *counter)
+{
+ if (counter->attr.comm)
+ return 1;
+
+ return 0;
+}
+
+static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
+ struct perf_comm_event *comm_event)
+{
+ struct perf_counter *counter;
+
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_counter_comm_match(counter))
+ perf_counter_comm_output(counter, comm_event);
+ }
+ rcu_read_unlock();
+}
+
+static void perf_counter_comm_event(struct perf_comm_event *comm_event)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+ unsigned int size;
+ char *comm = comm_event->task->comm;
+
+ size = ALIGN(strlen(comm)+1, sizeof(u64));
+
+ comm_event->comm = comm;
+ comm_event->comm_size = size;
+
+ comm_event->event.header.size = sizeof(comm_event->event) + size;
+
+ cpuctx = &get_cpu_var(perf_cpu_context);
+ perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
+ put_cpu_var(perf_cpu_context);
+
+ rcu_read_lock();
+ /*
+ * doesn't really matter which of the child contexts the
+ * events ends up in.
+ */
+ ctx = rcu_dereference(current->perf_counter_ctxp);
+ if (ctx)
+ perf_counter_comm_ctx(ctx, comm_event);
+ rcu_read_unlock();
+}
+
+void perf_counter_comm(struct task_struct *task)
+{
+ struct perf_comm_event comm_event;
+
+ if (!atomic_read(&nr_comm_counters))
+ return;
+
+ comm_event = (struct perf_comm_event){
+ .task = task,
+ .event = {
+ .header = { .type = PERF_EVENT_COMM, },
+ },
+ };
+
+ perf_counter_comm_event(&comm_event);
+}
+
+/*
+ * mmap tracking
+ */
+
+struct perf_mmap_event {
+ struct vm_area_struct *vma;
+
+ const char *file_name;
+ int file_size;
+
+ struct {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ u64 start;
+ u64 len;
+ u64 pgoff;
+ } event;
+};
+
+static void perf_counter_mmap_output(struct perf_counter *counter,
+ struct perf_mmap_event *mmap_event)
+{
+ struct perf_output_handle handle;
+ int size = mmap_event->event.header.size;
+ int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+ if (ret)
+ return;
+
+ mmap_event->event.pid = perf_counter_pid(counter, current);
+ mmap_event->event.tid = perf_counter_tid(counter, current);
+
+ perf_output_put(&handle, mmap_event->event);
+ perf_output_copy(&handle, mmap_event->file_name,
+ mmap_event->file_size);
+ perf_output_end(&handle);
+}
+
+static int perf_counter_mmap_match(struct perf_counter *counter,
+ struct perf_mmap_event *mmap_event)
+{
+ if (counter->attr.mmap)
+ return 1;
+
+ return 0;
+}
+
+static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
+ struct perf_mmap_event *mmap_event)
+{
+ struct perf_counter *counter;
+
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_counter_mmap_match(counter, mmap_event))
+ perf_counter_mmap_output(counter, mmap_event);
+ }
+ rcu_read_unlock();
+}
+
+static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
+{
+ struct perf_cpu_context *cpuctx;
+ struct perf_counter_context *ctx;
+ struct vm_area_struct *vma = mmap_event->vma;
+ struct file *file = vma->vm_file;
+ unsigned int size;
+ char tmp[16];
+ char *buf = NULL;
+ const char *name;
+
+ if (file) {
+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
+ if (!buf) {
+ name = strncpy(tmp, "//enomem", sizeof(tmp));
+ goto got_name;
+ }
+ name = d_path(&file->f_path, buf, PATH_MAX);
+ if (IS_ERR(name)) {
+ name = strncpy(tmp, "//toolong", sizeof(tmp));
+ goto got_name;
+ }
+ } else {
+ name = arch_vma_name(mmap_event->vma);
+ if (name)
+ goto got_name;
+
+ if (!vma->vm_mm) {
+ name = strncpy(tmp, "[vdso]", sizeof(tmp));
+ goto got_name;
+ }
+
+ name = strncpy(tmp, "//anon", sizeof(tmp));
+ goto got_name;
+ }
+
+got_name:
+ size = ALIGN(strlen(name)+1, sizeof(u64));
+
+ mmap_event->file_name = name;
+ mmap_event->file_size = size;
+
+ mmap_event->event.header.size = sizeof(mmap_event->event) + size;
+
+ cpuctx = &get_cpu_var(perf_cpu_context);
+ perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
+ put_cpu_var(perf_cpu_context);
+
+ rcu_read_lock();
+ /*
+ * doesn't really matter which of the child contexts the
+ * events ends up in.
+ */
+ ctx = rcu_dereference(current->perf_counter_ctxp);
+ if (ctx)
+ perf_counter_mmap_ctx(ctx, mmap_event);
+ rcu_read_unlock();
+
+ kfree(buf);
+}
+
+void __perf_counter_mmap(struct vm_area_struct *vma)
+{
+ struct perf_mmap_event mmap_event;
+
+ if (!atomic_read(&nr_mmap_counters))
+ return;
+
+ mmap_event = (struct perf_mmap_event){
+ .vma = vma,
+ .event = {
+ .header = { .type = PERF_EVENT_MMAP, },
+ .start = vma->vm_start,
+ .len = vma->vm_end - vma->vm_start,
+ .pgoff = vma->vm_pgoff,
+ },
+ };
+
+ perf_counter_mmap_event(&mmap_event);
+}
+
+/*
+ * Log sample_period changes so that analyzing tools can re-normalize the
+ * event flow.
+ */
+
+struct freq_event {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ u64 period;
+};
+
+static void perf_log_period(struct perf_counter *counter, u64 period)
+{
+ struct perf_output_handle handle;
+ struct freq_event event;
+ int ret;
+
+ if (counter->hw.sample_period == period)
+ return;
+
+ if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
+ return;
+
+ event = (struct freq_event) {
+ .header = {
+ .type = PERF_EVENT_PERIOD,
+ .misc = 0,
+ .size = sizeof(event),
+ },
+ .time = sched_clock(),
+ .id = counter->id,
+ .period = period,
+ };
+
+ ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, event);
+ perf_output_end(&handle);
+}
+
+/*
+ * IRQ throttle logging
+ */
+
+static void perf_log_throttle(struct perf_counter *counter, int enable)
+{
+ struct perf_output_handle handle;
+ int ret;
+
+ struct {
+ struct perf_event_header header;
+ u64 time;
+ u64 id;
+ } throttle_event = {
+ .header = {
+ .type = PERF_EVENT_THROTTLE + 1,
+ .misc = 0,
+ .size = sizeof(throttle_event),
+ },
+ .time = sched_clock(),
+ .id = counter->id,
+ };
+
+ ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
+ if (ret)
+ return;
+
+ perf_output_put(&handle, throttle_event);
+ perf_output_end(&handle);
+}
+
+/*
+ * Generic counter overflow handling.
+ */
+
+int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data)
+{
+ int events = atomic_read(&counter->event_limit);
+ int throttle = counter->pmu->unthrottle != NULL;
+ struct hw_perf_counter *hwc = &counter->hw;
+ int ret = 0;
+
+ if (!throttle) {
+ hwc->interrupts++;
+ } else {
+ if (hwc->interrupts != MAX_INTERRUPTS) {
+ hwc->interrupts++;
+ if (HZ * hwc->interrupts >
+ (u64)sysctl_perf_counter_sample_rate) {
+ hwc->interrupts = MAX_INTERRUPTS;
+ perf_log_throttle(counter, 0);
+ ret = 1;
+ }
+ } else {
+ /*
+ * Keep re-disabling counters even though on the previous
+ * pass we disabled it - just in case we raced with a
+ * sched-in and the counter got enabled again:
+ */
+ ret = 1;
+ }
+ }
+
+ if (counter->attr.freq) {
+ u64 now = sched_clock();
+ s64 delta = now - hwc->freq_stamp;
+
+ hwc->freq_stamp = now;
+
+ if (delta > 0 && delta < TICK_NSEC)
+ perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
+ }
+
+ /*
+ * XXX event_limit might not quite work as expected on inherited
+ * counters
+ */
+
+ counter->pending_kill = POLL_IN;
+ if (events && atomic_dec_and_test(&counter->event_limit)) {
+ ret = 1;
+ counter->pending_kill = POLL_HUP;
+ if (nmi) {
+ counter->pending_disable = 1;
+ perf_pending_queue(&counter->pending,
+ perf_pending_counter);
+ } else
+ perf_counter_disable(counter);
+ }
+
+ perf_counter_output(counter, nmi, data);
+ return ret;
+}
+
+/*
+ * Generic software counter infrastructure
+ */
+
+static void perf_swcounter_update(struct perf_counter *counter)
+{
+ struct hw_perf_counter *hwc = &counter->hw;
+ u64 prev, now;
+ s64 delta;
+
+again:
+ prev = atomic64_read(&hwc->prev_count);
+ now = atomic64_read(&hwc->count);
+ if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
+ goto again;
+
+ delta = now - prev;
+
+ atomic64_add(delta, &counter->count);
+ atomic64_sub(delta, &hwc->period_left);
+}
+
+static void perf_swcounter_set_period(struct perf_counter *counter)
+{
+ struct hw_perf_counter *hwc = &counter->hw;
+ s64 left = atomic64_read(&hwc->period_left);
+ s64 period = hwc->sample_period;
+
+ if (unlikely(left <= -period)) {
+ left = period;
+ atomic64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ }
+
+ if (unlikely(left <= 0)) {
+ left += period;
+ atomic64_add(period, &hwc->period_left);
+ hwc->last_period = period;
+ }
+
+ atomic64_set(&hwc->prev_count, -left);
+ atomic64_set(&hwc->count, -left);
+}
+
+static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
+{
+ enum hrtimer_restart ret = HRTIMER_RESTART;
+ struct perf_sample_data data;
+ struct perf_counter *counter;
+ u64 period;
+
+ counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
+ counter->pmu->read(counter);
+
+ data.addr = 0;
+ data.regs = get_irq_regs();
+ /*
+ * In case we exclude kernel IPs or are somehow not in interrupt
+ * context, provide the next best thing, the user IP.
+ */
+ if ((counter->attr.exclude_kernel || !data.regs) &&
+ !counter->attr.exclude_user)
+ data.regs = task_pt_regs(current);
+
+ if (data.regs) {
+ if (perf_counter_overflow(counter, 0, &data))
+ ret = HRTIMER_NORESTART;
+ }
+
+ period = max_t(u64, 10000, counter->hw.sample_period);
+ hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+
+ return ret;
+}
+
+static void perf_swcounter_overflow(struct perf_counter *counter,
+ int nmi, struct pt_regs *regs, u64 addr)
+{
+ struct perf_sample_data data = {
+ .regs = regs,
+ .addr = addr,
+ .period = counter->hw.last_period,
+ };
+
+ perf_swcounter_update(counter);
+ perf_swcounter_set_period(counter);
+ if (perf_counter_overflow(counter, nmi, &data))
+ /* soft-disable the counter */
+ ;
+
+}
+
+static int perf_swcounter_is_counting(struct perf_counter *counter)
+{
+ struct perf_counter_context *ctx;
+ unsigned long flags;
+ int count;
+
+ if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+ return 1;
+
+ if (counter->state != PERF_COUNTER_STATE_INACTIVE)
+ return 0;
+
+ /*
+ * If the counter is inactive, it could be just because
+ * its task is scheduled out, or because it's in a group
+ * which could not go on the PMU. We want to count in
+ * the first case but not the second. If the context is
+ * currently active then an inactive software counter must
+ * be the second case. If it's not currently active then
+ * we need to know whether the counter was active when the
+ * context was last active, which we can determine by
+ * comparing counter->tstamp_stopped with ctx->time.
+ *
+ * We are within an RCU read-side critical section,
+ * which protects the existence of *ctx.
+ */
+ ctx = counter->ctx;
+ spin_lock_irqsave(&ctx->lock, flags);
+ count = 1;
+ /* Re-check state now we have the lock */
+ if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
+ counter->ctx->is_active ||
+ counter->tstamp_stopped < ctx->time)
+ count = 0;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return count;
+}
+
+static int perf_swcounter_match(struct perf_counter *counter,
+ enum perf_type_id type,
+ u32 event, struct pt_regs *regs)
+{
+ if (!perf_swcounter_is_counting(counter))
+ return 0;
+
+ if (counter->attr.type != type)
+ return 0;
+ if (counter->attr.config != event)
+ return 0;
+
+ if (regs) {
+ if (counter->attr.exclude_user && user_mode(regs))
+ return 0;
+
+ if (counter->attr.exclude_kernel && !user_mode(regs))
+ return 0;
+ }
+
+ return 1;
+}
+
+static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
+ int nmi, struct pt_regs *regs, u64 addr)
+{
+ int neg = atomic64_add_negative(nr, &counter->hw.count);
+
+ if (counter->hw.sample_period && !neg && regs)
+ perf_swcounter_overflow(counter, nmi, regs, addr);
+}
+
+static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
+ enum perf_type_id type, u32 event,
+ u64 nr, int nmi, struct pt_regs *regs,
+ u64 addr)
+{
+ struct perf_counter *counter;
+
+ if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+ if (perf_swcounter_match(counter, type, event, regs))
+ perf_swcounter_add(counter, nr, nmi, regs, addr);
+ }
+ rcu_read_unlock();
+}
+
+static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
+{
+ if (in_nmi())
+ return &cpuctx->recursion[3];
+
+ if (in_irq())
+ return &cpuctx->recursion[2];
+
+ if (in_softirq())
+ return &cpuctx->recursion[1];
+
+ return &cpuctx->recursion[0];
+}
+
+static void __perf_swcounter_event(enum perf_type_id type, u32 event,
+ u64 nr, int nmi, struct pt_regs *regs,
+ u64 addr)
+{
+ struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
+ int *recursion = perf_swcounter_recursion_context(cpuctx);
+ struct perf_counter_context *ctx;
+
+ if (*recursion)
+ goto out;
+
+ (*recursion)++;
+ barrier();
+
+ perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
+ nr, nmi, regs, addr);
+ rcu_read_lock();
+ /*
+ * doesn't really matter which of the child contexts the
+ * events ends up in.
+ */
+ ctx = rcu_dereference(current->perf_counter_ctxp);
+ if (ctx)
+ perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
+ rcu_read_unlock();
+
+ barrier();
+ (*recursion)--;
+
+out:
+ put_cpu_var(perf_cpu_context);
+}
+
+void
+perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
+}
+
+static void perf_swcounter_read(struct perf_counter *counter)
+{
+ perf_swcounter_update(counter);
+}
+
+static int perf_swcounter_enable(struct perf_counter *counter)
+{
+ perf_swcounter_set_period(counter);
+ return 0;
+}
+
+static void perf_swcounter_disable(struct perf_counter *counter)
+{
+ perf_swcounter_update(counter);
+}
+
+static const struct pmu perf_ops_generic = {
+ .enable = perf_swcounter_enable,
+ .disable = perf_swcounter_disable,
+ .read = perf_swcounter_read,
+};
+
+/*
+ * Software counter: cpu wall time clock
+ */
+
+static void cpu_clock_perf_counter_update(struct perf_counter *counter)
+{
+ int cpu = raw_smp_processor_id();
+ s64 prev;
+ u64 now;
+
+ now = cpu_clock(cpu);
+ prev = atomic64_read(&counter->hw.prev_count);
+ atomic64_set(&counter->hw.prev_count, now);
+ atomic64_add(now - prev, &counter->count);
+}
+
+static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
+{
+ struct hw_perf_counter *hwc = &counter->hw;
+ int cpu = raw_smp_processor_id();
+
+ atomic64_set(&hwc->prev_count, cpu_clock(cpu));
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swcounter_hrtimer;
+ if (hwc->sample_period) {
+ u64 period = max_t(u64, 10000, hwc->sample_period);
+ __hrtimer_start_range_ns(&hwc->hrtimer,
+ ns_to_ktime(period), 0,
+ HRTIMER_MODE_REL, 0);
+ }
+
+ return 0;
+}
+
+static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
+{
+ if (counter->hw.sample_period)
+ hrtimer_cancel(&counter->hw.hrtimer);
+ cpu_clock_perf_counter_update(counter);
+}
+
+static void cpu_clock_perf_counter_read(struct perf_counter *counter)
+{
+ cpu_clock_perf_counter_update(counter);
+}
+
+static const struct pmu perf_ops_cpu_clock = {
+ .enable = cpu_clock_perf_counter_enable,
+ .disable = cpu_clock_perf_counter_disable,
+ .read = cpu_clock_perf_counter_read,
+};
+
+/*
+ * Software counter: task time clock
+ */
+
+static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
+{
+ u64 prev;
+ s64 delta;
+
+ prev = atomic64_xchg(&counter->hw.prev_count, now);
+ delta = now - prev;
+ atomic64_add(delta, &counter->count);
+}
+
+static int task_clock_perf_counter_enable(struct perf_counter *counter)
+{
+ struct hw_perf_counter *hwc = &counter->hw;
+ u64 now;
+
+ now = counter->ctx->time;
+
+ atomic64_set(&hwc->prev_count, now);
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swcounter_hrtimer;
+ if (hwc->sample_period) {
+ u64 period = max_t(u64, 10000, hwc->sample_period);
+ __hrtimer_start_range_ns(&hwc->hrtimer,
+ ns_to_ktime(period), 0,
+ HRTIMER_MODE_REL, 0);
+ }
+
+ return 0;
+}
+
+static void task_clock_perf_counter_disable(struct perf_counter *counter)
+{
+ if (counter->hw.sample_period)
+ hrtimer_cancel(&counter->hw.hrtimer);
+ task_clock_perf_counter_update(counter, counter->ctx->time);
+
+}
+
+static void task_clock_perf_counter_read(struct perf_counter *counter)
+{
+ u64 time;
+
+ if (!in_nmi()) {
+ update_context_time(counter->ctx);
+ time = counter->ctx->time;
+ } else {
+ u64 now = perf_clock();
+ u64 delta = now - counter->ctx->timestamp;
+ time = counter->ctx->time + delta;
+ }
+
+ task_clock_perf_counter_update(counter, time);
+}
+
+static const struct pmu perf_ops_task_clock = {
+ .enable = task_clock_perf_counter_enable,
+ .disable = task_clock_perf_counter_disable,
+ .read = task_clock_perf_counter_read,
+};
+
+/*
+ * Software counter: cpu migrations
+ */
+void perf_counter_task_migration(struct task_struct *task, int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_counter_context *ctx;
+
+ perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
+ PERF_COUNT_SW_CPU_MIGRATIONS,
+ 1, 1, NULL, 0);
+
+ ctx = perf_pin_task_context(task);
+ if (ctx) {
+ perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
+ PERF_COUNT_SW_CPU_MIGRATIONS,
+ 1, 1, NULL, 0);
+ perf_unpin_context(ctx);
+ }
+}
+
+#ifdef CONFIG_EVENT_PROFILE
+void perf_tpcounter_event(int event_id)
+{
+ struct pt_regs *regs = get_irq_regs();
+
+ if (!regs)
+ regs = task_pt_regs(current);
+
+ __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
+}
+EXPORT_SYMBOL_GPL(perf_tpcounter_event);
+
+extern int ftrace_profile_enable(int);
+extern void ftrace_profile_disable(int);
+
+static void tp_perf_counter_destroy(struct perf_counter *counter)
+{
+ ftrace_profile_disable(perf_event_id(&counter->attr));
+}
+
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
+{
+ int event_id = perf_event_id(&counter->attr);
+ int ret;
+
+ ret = ftrace_profile_enable(event_id);
+ if (ret)
+ return NULL;
+
+ counter->destroy = tp_perf_counter_destroy;
+
+ return &perf_ops_generic;
+}
+#else
+static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
+{
+ return NULL;
+}
+#endif
+
+static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
+{
+ const struct pmu *pmu = NULL;
+
+ /*
+ * Software counters (currently) can't in general distinguish
+ * between user, kernel and hypervisor events.
+ * However, context switches and cpu migrations are considered
+ * to be kernel events, and page faults are never hypervisor
+ * events.
+ */
+ switch (counter->attr.config) {
+ case PERF_COUNT_SW_CPU_CLOCK:
+ pmu = &perf_ops_cpu_clock;
+
+ break;
+ case PERF_COUNT_SW_TASK_CLOCK:
+ /*
+ * If the user instantiates this as a per-cpu counter,
+ * use the cpu_clock counter instead.
+ */
+ if (counter->ctx->task)
+ pmu = &perf_ops_task_clock;
+ else
+ pmu = &perf_ops_cpu_clock;
+
+ break;
+ case PERF_COUNT_SW_PAGE_FAULTS:
+ case PERF_COUNT_SW_PAGE_FAULTS_MIN:
+ case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
+ case PERF_COUNT_SW_CONTEXT_SWITCHES:
+ case PERF_COUNT_SW_CPU_MIGRATIONS:
+ pmu = &perf_ops_generic;
+ break;
+ }
+
+ return pmu;
+}
+
+/*
+ * Allocate and initialize a counter structure
+ */
+static struct perf_counter *
+perf_counter_alloc(struct perf_counter_attr *attr,
+ int cpu,
+ struct perf_counter_context *ctx,
+ struct perf_counter *group_leader,
+ gfp_t gfpflags)
+{
+ const struct pmu *pmu;
+ struct perf_counter *counter;
+ struct hw_perf_counter *hwc;
+ long err;
+
+ counter = kzalloc(sizeof(*counter), gfpflags);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Single counters are their own group leaders, with an
+ * empty sibling list:
+ */
+ if (!group_leader)
+ group_leader = counter;
+
+ mutex_init(&counter->child_mutex);
+ INIT_LIST_HEAD(&counter->child_list);
+
+ INIT_LIST_HEAD(&counter->list_entry);
+ INIT_LIST_HEAD(&counter->event_entry);
+ INIT_LIST_HEAD(&counter->sibling_list);
+ init_waitqueue_head(&counter->waitq);
+
+ mutex_init(&counter->mmap_mutex);
+
+ counter->cpu = cpu;
+ counter->attr = *attr;
+ counter->group_leader = group_leader;
+ counter->pmu = NULL;
+ counter->ctx = ctx;
+ counter->oncpu = -1;
+
+ counter->ns = get_pid_ns(current->nsproxy->pid_ns);
+ counter->id = atomic64_inc_return(&perf_counter_id);
+
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+
+ if (attr->disabled)
+ counter->state = PERF_COUNTER_STATE_OFF;
+
+ pmu = NULL;
+
+ hwc = &counter->hw;
+ hwc->sample_period = attr->sample_period;
+ if (attr->freq && attr->sample_freq)
+ hwc->sample_period = 1;
+
+ atomic64_set(&hwc->period_left, hwc->sample_period);
+
+ /*
+ * we currently do not support PERF_SAMPLE_GROUP on inherited counters
+ */
+ if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
+ goto done;
+
+ switch (attr->type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ pmu = hw_perf_counter_init(counter);
+ break;
+
+ case PERF_TYPE_SOFTWARE:
+ pmu = sw_perf_counter_init(counter);
+ break;
+
+ case PERF_TYPE_TRACEPOINT:
+ pmu = tp_perf_counter_init(counter);
+ break;
+
+ default:
+ break;
+ }
+done:
+ err = 0;
+ if (!pmu)
+ err = -EINVAL;
+ else if (IS_ERR(pmu))
+ err = PTR_ERR(pmu);
+
+ if (err) {
+ if (counter->ns)
+ put_pid_ns(counter->ns);
+ kfree(counter);
+ return ERR_PTR(err);
+ }
+
+ counter->pmu = pmu;
+
+ atomic_inc(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_inc(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_inc(&nr_comm_counters);
+
+ return counter;
+}
+
+static int perf_copy_attr(struct perf_counter_attr __user *uattr,
+ struct perf_counter_attr *attr)
+{
+ int ret;
+ u32 size;
+
+ if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
+ return -EFAULT;
+
+ /*
+ * zero the full structure, so that a short copy will be nice.
+ */
+ memset(attr, 0, sizeof(*attr));
+
+ ret = get_user(size, &uattr->size);
+ if (ret)
+ return ret;
+
+ if (size > PAGE_SIZE) /* silly large */
+ goto err_size;
+
+ if (!size) /* abi compat */
+ size = PERF_ATTR_SIZE_VER0;
+
+ if (size < PERF_ATTR_SIZE_VER0)
+ goto err_size;
+
+ /*
+ * If we're handed a bigger struct than we know of,
+ * ensure all the unknown bits are 0.
+ */
+ if (size > sizeof(*attr)) {
+ unsigned long val;
+ unsigned long __user *addr;
+ unsigned long __user *end;
+
+ addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
+ sizeof(unsigned long));
+ end = PTR_ALIGN((void __user *)uattr + size,
+ sizeof(unsigned long));
+
+ for (; addr < end; addr += sizeof(unsigned long)) {
+ ret = get_user(val, addr);
+ if (ret)
+ return ret;
+ if (val)
+ goto err_size;
+ }
+ }
+
+ ret = copy_from_user(attr, uattr, size);
+ if (ret)
+ return -EFAULT;
+
+ /*
+ * If the type exists, the corresponding creation will verify
+ * the attr->config.
+ */
+ if (attr->type >= PERF_TYPE_MAX)
+ return -EINVAL;
+
+ if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
+ return -EINVAL;
+
+ if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
+ return -EINVAL;
+
+ if (attr->read_format & ~(PERF_FORMAT_MAX-1))
+ return -EINVAL;
+
+out:
+ return ret;
+
+err_size:
+ put_user(sizeof(*attr), &uattr->size);
+ ret = -E2BIG;
+ goto out;
+}
+
+/**
+ * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
+ *
+ * @attr_uptr: event type attributes for monitoring/sampling
+ * @pid: target pid
+ * @cpu: target cpu
+ * @group_fd: group leader counter fd
+ */
+SYSCALL_DEFINE5(perf_counter_open,
+ struct perf_counter_attr __user *, attr_uptr,
+ pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
+{
+ struct perf_counter *counter, *group_leader;
+ struct perf_counter_attr attr;
+ struct perf_counter_context *ctx;
+ struct file *counter_file = NULL;
+ struct file *group_file = NULL;
+ int fput_needed = 0;
+ int fput_needed2 = 0;
+ int ret;
+
+ /* for future expandability... */
+ if (flags)
+ return -EINVAL;
+
+ ret = perf_copy_attr(attr_uptr, &attr);
+ if (ret)
+ return ret;
+
+ if (!attr.exclude_kernel) {
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ }
+
+ if (attr.freq) {
+ if (attr.sample_freq > sysctl_perf_counter_sample_rate)
+ return -EINVAL;
+ }
+
+ /*
+ * Get the target context (task or percpu):
+ */
+ ctx = find_get_context(pid, cpu);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ /*
+ * Look up the group leader (we will attach this counter to it):
+ */
+ group_leader = NULL;
+ if (group_fd != -1) {
+ ret = -EINVAL;
+ group_file = fget_light(group_fd, &fput_needed);
+ if (!group_file)
+ goto err_put_context;
+ if (group_file->f_op != &perf_fops)
+ goto err_put_context;
+
+ group_leader = group_file->private_data;
+ /*
+ * Do not allow a recursive hierarchy (this new sibling
+ * becoming part of another group-sibling):
+ */
+ if (group_leader->group_leader != group_leader)
+ goto err_put_context;
+ /*
+ * Do not allow to attach to a group in a different
+ * task or CPU context:
+ */
+ if (group_leader->ctx != ctx)
+ goto err_put_context;
+ /*
+ * Only a group leader can be exclusive or pinned
+ */
+ if (attr.exclusive || attr.pinned)
+ goto err_put_context;
+ }
+
+ counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
+ GFP_KERNEL);
+ ret = PTR_ERR(counter);
+ if (IS_ERR(counter))
+ goto err_put_context;
+
+ ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
+ if (ret < 0)
+ goto err_free_put_context;
+
+ counter_file = fget_light(ret, &fput_needed2);
+ if (!counter_file)
+ goto err_free_put_context;
+
+ counter->filp = counter_file;
+ WARN_ON_ONCE(ctx->parent_ctx);
+ mutex_lock(&ctx->mutex);
+ perf_install_in_context(ctx, counter, cpu);
+ ++ctx->generation;
+ mutex_unlock(&ctx->mutex);
+
+ counter->owner = current;
+ get_task_struct(current);
+ mutex_lock(&current->perf_counter_mutex);
+ list_add_tail(&counter->owner_entry, &current->perf_counter_list);
+ mutex_unlock(&current->perf_counter_mutex);
+
+ fput_light(counter_file, fput_needed2);
+
+out_fput:
+ fput_light(group_file, fput_needed);
+
+ return ret;
+
+err_free_put_context:
+ kfree(counter);
+
+err_put_context:
+ put_ctx(ctx);
+
+ goto out_fput;
+}
+
+/*
+ * inherit a counter from parent task to child task:
+ */
+static struct perf_counter *
+inherit_counter(struct perf_counter *parent_counter,
+ struct task_struct *parent,
+ struct perf_counter_context *parent_ctx,
+ struct task_struct *child,
+ struct perf_counter *group_leader,
+ struct perf_counter_context *child_ctx)
+{
+ struct perf_counter *child_counter;
+
+ /*
+ * Instead of creating recursive hierarchies of counters,
+ * we link inherited counters back to the original parent,
+ * which has a filp for sure, which we use as the reference
+ * count:
+ */
+ if (parent_counter->parent)
+ parent_counter = parent_counter->parent;
+
+ child_counter = perf_counter_alloc(&parent_counter->attr,
+ parent_counter->cpu, child_ctx,
+ group_leader, GFP_KERNEL);
+ if (IS_ERR(child_counter))
+ return child_counter;
+ get_ctx(child_ctx);
+
+ /*
+ * Make the child state follow the state of the parent counter,
+ * not its attr.disabled bit. We hold the parent's mutex,
+ * so we won't race with perf_counter_{en, dis}able_family.
+ */
+ if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ child_counter->state = PERF_COUNTER_STATE_INACTIVE;
+ else
+ child_counter->state = PERF_COUNTER_STATE_OFF;
+
+ if (parent_counter->attr.freq)
+ child_counter->hw.sample_period = parent_counter->hw.sample_period;
+
+ /*
+ * Link it up in the child's context:
+ */
+ add_counter_to_ctx(child_counter, child_ctx);
+
+ child_counter->parent = parent_counter;
+ /*
+ * inherit into child's child as well:
+ */
+ child_counter->attr.inherit = 1;
+
+ /*
+ * Get a reference to the parent filp - we will fput it
+ * when the child counter exits. This is safe to do because
+ * we are in the parent and we know that the filp still
+ * exists and has a nonzero count:
+ */
+ atomic_long_inc(&parent_counter->filp->f_count);
+
+ /*
+ * Link this into the parent counter's child list
+ */
+ WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
+ mutex_lock(&parent_counter->child_mutex);
+ list_add_tail(&child_counter->child_list, &parent_counter->child_list);
+ mutex_unlock(&parent_counter->child_mutex);
+
+ return child_counter;
+}
+
+static int inherit_group(struct perf_counter *parent_counter,
+ struct task_struct *parent,
+ struct perf_counter_context *parent_ctx,
+ struct task_struct *child,
+ struct perf_counter_context *child_ctx)
+{
+ struct perf_counter *leader;
+ struct perf_counter *sub;
+ struct perf_counter *child_ctr;
+
+ leader = inherit_counter(parent_counter, parent, parent_ctx,
+ child, NULL, child_ctx);
+ if (IS_ERR(leader))
+ return PTR_ERR(leader);
+ list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
+ child_ctr = inherit_counter(sub, parent, parent_ctx,
+ child, leader, child_ctx);
+ if (IS_ERR(child_ctr))
+ return PTR_ERR(child_ctr);
+ }
+ return 0;
+}
+
+static void sync_child_counter(struct perf_counter *child_counter,
+ struct perf_counter *parent_counter)
+{
+ u64 child_val;
+
+ child_val = atomic64_read(&child_counter->count);
+
+ /*
+ * Add back the child's count to the parent's count:
+ */
+ atomic64_add(child_val, &parent_counter->count);
+ atomic64_add(child_counter->total_time_enabled,
+ &parent_counter->child_total_time_enabled);
+ atomic64_add(child_counter->total_time_running,
+ &parent_counter->child_total_time_running);
+
+ /*
+ * Remove this counter from the parent's list
+ */
+ WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
+ mutex_lock(&parent_counter->child_mutex);
+ list_del_init(&child_counter->child_list);
+ mutex_unlock(&parent_counter->child_mutex);
+
+ /*
+ * Release the parent counter, if this was the last
+ * reference to it.
+ */
+ fput(parent_counter->filp);
+}
+
+static void
+__perf_counter_exit_task(struct perf_counter *child_counter,
+ struct perf_counter_context *child_ctx)
+{
+ struct perf_counter *parent_counter;
+
+ update_counter_times(child_counter);
+ perf_counter_remove_from_context(child_counter);
+
+ parent_counter = child_counter->parent;
+ /*
+ * It can happen that parent exits first, and has counters
+ * that are still around due to the child reference. These
+ * counters need to be zapped - but otherwise linger.
+ */
+ if (parent_counter) {
+ sync_child_counter(child_counter, parent_counter);
+ free_counter(child_counter);
+ }
+}
+
+/*
+ * When a child task exits, feed back counter values to parent counters.
+ */
+void perf_counter_exit_task(struct task_struct *child)
+{
+ struct perf_counter *child_counter, *tmp;
+ struct perf_counter_context *child_ctx;
+ unsigned long flags;
+
+ if (likely(!child->perf_counter_ctxp))
+ return;
+
+ local_irq_save(flags);
+ /*
+ * We can't reschedule here because interrupts are disabled,
+ * and either child is current or it is a task that can't be
+ * scheduled, so we are now safe from rescheduling changing
+ * our context.
+ */
+ child_ctx = child->perf_counter_ctxp;
+ __perf_counter_task_sched_out(child_ctx);
+
+ /*
+ * Take the context lock here so that if find_get_context is
+ * reading child->perf_counter_ctxp, we wait until it has
+ * incremented the context's refcount before we do put_ctx below.
+ */
+ spin_lock(&child_ctx->lock);
+ child->perf_counter_ctxp = NULL;
+ if (child_ctx->parent_ctx) {
+ /*
+ * This context is a clone; unclone it so it can't get
+ * swapped to another process while we're removing all
+ * the counters from it.
+ */
+ put_ctx(child_ctx->parent_ctx);
+ child_ctx->parent_ctx = NULL;
+ }
+ spin_unlock(&child_ctx->lock);
+ local_irq_restore(flags);
+
+ /*
+ * We can recurse on the same lock type through:
+ *
+ * __perf_counter_exit_task()
+ * sync_child_counter()
+ * fput(parent_counter->filp)
+ * perf_release()
+ * mutex_lock(&ctx->mutex)
+ *
+ * But since its the parent context it won't be the same instance.
+ */
+ mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
+
+again:
+ list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
+ list_entry)
+ __perf_counter_exit_task(child_counter, child_ctx);
+
+ /*
+ * If the last counter was a group counter, it will have appended all
+ * its siblings to the list, but we obtained 'tmp' before that which
+ * will still point to the list head terminating the iteration.
+ */
+ if (!list_empty(&child_ctx->counter_list))
+ goto again;
+
+ mutex_unlock(&child_ctx->mutex);
+
+ put_ctx(child_ctx);
+}
+
+/*
+ * free an unexposed, unused context as created by inheritance by
+ * init_task below, used by fork() in case of fail.
+ */
+void perf_counter_free_task(struct task_struct *task)
+{
+ struct perf_counter_context *ctx = task->perf_counter_ctxp;
+ struct perf_counter *counter, *tmp;
+
+ if (!ctx)
+ return;
+
+ mutex_lock(&ctx->mutex);
+again:
+ list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
+ struct perf_counter *parent = counter->parent;
+
+ if (WARN_ON_ONCE(!parent))
+ continue;
+
+ mutex_lock(&parent->child_mutex);
+ list_del_init(&counter->child_list);
+ mutex_unlock(&parent->child_mutex);
+
+ fput(parent->filp);
+
+ list_del_counter(counter, ctx);
+ free_counter(counter);
+ }
+
+ if (!list_empty(&ctx->counter_list))
+ goto again;
+
+ mutex_unlock(&ctx->mutex);
+
+ put_ctx(ctx);
+}
+
+/*
+ * Initialize the perf_counter context in task_struct
+ */
+int perf_counter_init_task(struct task_struct *child)
+{
+ struct perf_counter_context *child_ctx, *parent_ctx;
+ struct perf_counter_context *cloned_ctx;
+ struct perf_counter *counter;
+ struct task_struct *parent = current;
+ int inherited_all = 1;
+ int ret = 0;
+
+ child->perf_counter_ctxp = NULL;
+
+ mutex_init(&child->perf_counter_mutex);
+ INIT_LIST_HEAD(&child->perf_counter_list);
+
+ if (likely(!parent->perf_counter_ctxp))
+ return 0;
+
+ /*
+ * This is executed from the parent task context, so inherit
+ * counters that have been marked for cloning.
+ * First allocate and initialize a context for the child.
+ */
+
+ child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
+ if (!child_ctx)
+ return -ENOMEM;
+
+ __perf_counter_init_context(child_ctx, child);
+ child->perf_counter_ctxp = child_ctx;
+ get_task_struct(child);
+
+ /*
+ * If the parent's context is a clone, pin it so it won't get
+ * swapped under us.
+ */
+ parent_ctx = perf_pin_task_context(parent);
+
+ /*
+ * No need to check if parent_ctx != NULL here; since we saw
+ * it non-NULL earlier, the only reason for it to become NULL
+ * is if we exit, and since we're currently in the middle of
+ * a fork we can't be exiting at the same time.
+ */
+
+ /*
+ * Lock the parent list. No need to lock the child - not PID
+ * hashed yet and not running, so nobody can access it.
+ */
+ mutex_lock(&parent_ctx->mutex);
+
+ /*
+ * We dont have to disable NMIs - we are only looking at
+ * the list, not manipulating it:
+ */
+ list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
+ if (counter != counter->group_leader)
+ continue;
+
+ if (!counter->attr.inherit) {
+ inherited_all = 0;
+ continue;
+ }
+
+ ret = inherit_group(counter, parent, parent_ctx,
+ child, child_ctx);
+ if (ret) {
+ inherited_all = 0;
+ break;
+ }
+ }
+
+ if (inherited_all) {
+ /*
+ * Mark the child context as a clone of the parent
+ * context, or of whatever the parent is a clone of.
+ * Note that if the parent is a clone, it could get
+ * uncloned at any point, but that doesn't matter
+ * because the list of counters and the generation
+ * count can't have changed since we took the mutex.
+ */
+ cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+ if (cloned_ctx) {
+ child_ctx->parent_ctx = cloned_ctx;
+ child_ctx->parent_gen = parent_ctx->parent_gen;
+ } else {
+ child_ctx->parent_ctx = parent_ctx;
+ child_ctx->parent_gen = parent_ctx->generation;
+ }
+ get_ctx(child_ctx->parent_ctx);
+ }
+
+ mutex_unlock(&parent_ctx->mutex);
+
+ perf_unpin_context(parent_ctx);
+
+ return ret;
+}
+
+static void __cpuinit perf_counter_init_cpu(int cpu)
+{
+ struct perf_cpu_context *cpuctx;
+
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ __perf_counter_init_context(&cpuctx->ctx, NULL);
+
+ spin_lock(&perf_resource_lock);
+ cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
+ spin_unlock(&perf_resource_lock);
+
+ hw_perf_counter_setup(cpu);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __perf_counter_exit_cpu(void *info)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_counter_context *ctx = &cpuctx->ctx;
+ struct perf_counter *counter, *tmp;
+
+ list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
+ __perf_counter_remove_from_context(counter);
+}
+static void perf_counter_exit_cpu(int cpu)
+{
+ struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_counter_context *ctx = &cpuctx->ctx;
+
+ mutex_lock(&ctx->mutex);
+ smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
+ mutex_unlock(&ctx->mutex);
+}
+#else
+static inline void perf_counter_exit_cpu(int cpu) { }
+#endif
+
+static int __cpuinit
+perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action) {
+
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ perf_counter_init_cpu(cpu);
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ perf_counter_exit_cpu(cpu);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * This has to have a higher priority than migration_notifier in sched.c.
+ */
+static struct notifier_block __cpuinitdata perf_cpu_nb = {
+ .notifier_call = perf_cpu_notify,
+ .priority = 20,
+};
+
+void __init perf_counter_init(void)
+{
+ perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&perf_cpu_nb);
+}
+
+static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
+{
+ return sprintf(buf, "%d\n", perf_reserved_percpu);
+}
+
+static ssize_t
+perf_set_reserve_percpu(struct sysdev_class *class,
+ const char *buf,
+ size_t count)
+{
+ struct perf_cpu_context *cpuctx;
+ unsigned long val;
+ int err, cpu, mpt;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val > perf_max_counters)
+ return -EINVAL;
+
+ spin_lock(&perf_resource_lock);
+ perf_reserved_percpu = val;
+ for_each_online_cpu(cpu) {
+ cpuctx = &per_cpu(perf_cpu_context, cpu);
+ spin_lock_irq(&cpuctx->ctx.lock);
+ mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
+ perf_max_counters - perf_reserved_percpu);
+ cpuctx->max_pertask = mpt;
+ spin_unlock_irq(&cpuctx->ctx.lock);
+ }
+ spin_unlock(&perf_resource_lock);
+
+ return count;
+}
+
+static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
+{
+ return sprintf(buf, "%d\n", perf_overcommit);
+}
+
+static ssize_t
+perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
+{
+ unsigned long val;
+ int err;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val > 1)
+ return -EINVAL;
+
+ spin_lock(&perf_resource_lock);
+ perf_overcommit = val;
+ spin_unlock(&perf_resource_lock);
+
+ return count;
+}
+
+static SYSDEV_CLASS_ATTR(
+ reserve_percpu,
+ 0644,
+ perf_show_reserve_percpu,
+ perf_set_reserve_percpu
+ );
+
+static SYSDEV_CLASS_ATTR(
+ overcommit,
+ 0644,
+ perf_show_overcommit,
+ perf_set_overcommit
+ );
+
+static struct attribute *perfclass_attrs[] = {
+ &attr_reserve_percpu.attr,
+ &attr_overcommit.attr,
+ NULL
+};
+
+static struct attribute_group perfclass_attr_group = {
+ .attrs = perfclass_attrs,
+ .name = "perf_counters",
+};
+
+static int __init perf_counter_sysfs_init(void)
+{
+ return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
+ &perfclass_attr_group);
+}
+device_initcall(perf_counter_sysfs_init);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 23bd4daeb96b..72067cbdb37f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -116,9 +116,13 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config HIBERNATION_NVS
+ bool
+
config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
+ select HIBERNATION_NVS if HAS_IOMEM
---help---
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 720ea4f781bd..c3b81c30e5d5 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -6,6 +6,9 @@ endif
obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
-obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
+obj-$(CONFIG_SUSPEND) += suspend.o
+obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
+obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/hibernate.c
index e71ca9cd81b2..81d2e7464893 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/hibernate.c
@@ -1,12 +1,12 @@
/*
- * kernel/power/disk.c - Suspend-to-disk support.
+ * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
*
* This file is released under the GPLv2.
- *
*/
#include <linux/suspend.h>
@@ -215,19 +215,17 @@ static int create_image(int platform_mode)
if (error)
return error;
- device_pm_lock();
-
- /* At this point, device_suspend() has been called, but *not*
- * device_power_down(). We *must* call device_power_down() now.
+ /* At this point, dpm_suspend_start() has been called, but *not*
+ * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
* Otherwise, drivers for some devices (e.g. interrupt controllers)
* become desynchronized with the actual state of the hardware
* at resume time, and evil weirdness ensues.
*/
- error = device_power_down(PMSG_FREEZE);
+ error = dpm_suspend_noirq(PMSG_FREEZE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting hibernation\n");
- goto Unlock;
+ return error;
}
error = platform_pre_snapshot(platform_mode);
@@ -241,9 +239,9 @@ static int create_image(int platform_mode)
local_irq_disable();
- sysdev_suspend(PMSG_FREEZE);
+ error = sysdev_suspend(PMSG_FREEZE);
if (error) {
- printk(KERN_ERR "PM: Some devices failed to power down, "
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
"aborting hibernation\n");
goto Enable_irqs;
}
@@ -264,7 +262,7 @@ static int create_image(int platform_mode)
Power_up:
sysdev_resume();
- /* NOTE: device_power_up() is just a resume() for devices
+ /* NOTE: dpm_resume_noirq() is just a resume() for devices
* that suspended with irqs off ... no overall powerup.
*/
@@ -277,12 +275,9 @@ static int create_image(int platform_mode)
Platform_finish:
platform_finish(platform_mode);
- device_power_up(in_suspend ?
+ dpm_resume_noirq(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
- Unlock:
- device_pm_unlock();
-
return error;
}
@@ -309,7 +304,7 @@ int hibernation_snapshot(int platform_mode)
goto Close;
suspend_console();
- error = device_suspend(PMSG_FREEZE);
+ error = dpm_suspend_start(PMSG_FREEZE);
if (error)
goto Recover_platform;
@@ -320,7 +315,7 @@ int hibernation_snapshot(int platform_mode)
/* Control returns here after successful restore */
Resume_devices:
- device_resume(in_suspend ?
+ dpm_resume_end(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
resume_console();
Close:
@@ -344,13 +339,11 @@ static int resume_target_kernel(bool platform_mode)
{
int error;
- device_pm_lock();
-
- error = device_power_down(PMSG_QUIESCE);
+ error = dpm_suspend_noirq(PMSG_QUIESCE);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting resume\n");
- goto Unlock;
+ return error;
}
error = platform_pre_restore(platform_mode);
@@ -401,10 +394,7 @@ static int resume_target_kernel(bool platform_mode)
Cleanup:
platform_restore_cleanup(platform_mode);
- device_power_up(PMSG_RECOVER);
-
- Unlock:
- device_pm_unlock();
+ dpm_resume_noirq(PMSG_RECOVER);
return error;
}
@@ -424,10 +414,10 @@ int hibernation_restore(int platform_mode)
pm_prepare_console();
suspend_console();
- error = device_suspend(PMSG_QUIESCE);
+ error = dpm_suspend_start(PMSG_QUIESCE);
if (!error) {
error = resume_target_kernel(platform_mode);
- device_resume(PMSG_RECOVER);
+ dpm_resume_end(PMSG_RECOVER);
}
resume_console();
pm_restore_console();
@@ -457,18 +447,16 @@ int hibernation_platform_enter(void)
entering_platform_hibernation = true;
suspend_console();
- error = device_suspend(PMSG_HIBERNATE);
+ error = dpm_suspend_start(PMSG_HIBERNATE);
if (error) {
if (hibernation_ops->recover)
hibernation_ops->recover();
goto Resume_devices;
}
- device_pm_lock();
-
- error = device_power_down(PMSG_HIBERNATE);
+ error = dpm_suspend_noirq(PMSG_HIBERNATE);
if (error)
- goto Unlock;
+ goto Resume_devices;
error = hibernation_ops->prepare();
if (error)
@@ -491,14 +479,11 @@ int hibernation_platform_enter(void)
Platofrm_finish:
hibernation_ops->finish();
- device_power_up(PMSG_RESTORE);
-
- Unlock:
- device_pm_unlock();
+ dpm_suspend_noirq(PMSG_RESTORE);
Resume_devices:
entering_platform_hibernation = false;
- device_resume(PMSG_RESTORE);
+ dpm_resume_end(PMSG_RESTORE);
resume_console();
Close:
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c
new file mode 100644
index 000000000000..39ac698ef836
--- /dev/null
+++ b/kernel/power/hibernate_nvs.c
@@ -0,0 +1,135 @@
+/*
+ * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
+ *
+ * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/suspend.h>
+
+/*
+ * Platforms, like ACPI, may want us to save some memory used by them during
+ * hibernation and to restore the contents of this memory during the subsequent
+ * resume. The code below implements a mechanism allowing us to do that.
+ */
+
+struct nvs_page {
+ unsigned long phys_start;
+ unsigned int size;
+ void *kaddr;
+ void *data;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_list);
+
+/**
+ * hibernate_nvs_register - register platform NVS memory region to save
+ * @start - physical address of the region
+ * @size - size of the region
+ *
+ * The NVS region need not be page-aligned (both ends) and we arrange
+ * things so that the data from page-aligned addresses in this region will
+ * be copied into separate RAM pages.
+ */
+int hibernate_nvs_register(unsigned long start, unsigned long size)
+{
+ struct nvs_page *entry, *next;
+
+ while (size > 0) {
+ unsigned int nr_bytes;
+
+ entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
+ if (!entry)
+ goto Error;
+
+ list_add_tail(&entry->node, &nvs_list);
+ entry->phys_start = start;
+ nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
+ entry->size = (size < nr_bytes) ? size : nr_bytes;
+
+ start += entry->size;
+ size -= entry->size;
+ }
+ return 0;
+
+ Error:
+ list_for_each_entry_safe(entry, next, &nvs_list, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ return -ENOMEM;
+}
+
+/**
+ * hibernate_nvs_free - free data pages allocated for saving NVS regions
+ */
+void hibernate_nvs_free(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ free_page((unsigned long)entry->data);
+ entry->data = NULL;
+ if (entry->kaddr) {
+ iounmap(entry->kaddr);
+ entry->kaddr = NULL;
+ }
+ }
+}
+
+/**
+ * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
+ */
+int hibernate_nvs_alloc(void)
+{
+ struct nvs_page *entry;
+
+ list_for_each_entry(entry, &nvs_list, node) {
+ entry->data = (void *)__get_free_page(GFP_KERNEL);
+ if (!entry->data) {
+ hibernate_nvs_free();
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+/**
+ * hibernate_nvs_save - save NVS memory regions
+ */
+void hibernate_nvs_save(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Saving platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data) {
+ entry->kaddr = ioremap(entry->phys_start, entry->size);
+ memcpy(entry->data, entry->kaddr, entry->size);
+ }
+}
+
+/**
+ * hibernate_nvs_restore - restore NVS memory regions
+ *
+ * This function is going to be called with interrupts disabled, so it
+ * cannot iounmap the virtual addresses used to access the NVS region.
+ */
+void hibernate_nvs_restore(void)
+{
+ struct nvs_page *entry;
+
+ printk(KERN_INFO "PM: Restoring platform NVS memory\n");
+
+ list_for_each_entry(entry, &nvs_list, node)
+ if (entry->data)
+ memcpy(entry->kaddr, entry->data, entry->size);
+}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f99ed6a75eac..f710e36930cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,20 +8,9 @@
*
*/
-#include <linux/module.h>
-#include <linux/suspend.h>
#include <linux/kobject.h>
#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/kmod.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/cpu.h>
#include <linux/resume-trace.h>
-#include <linux/freezer.h>
-#include <linux/vmstat.h>
-#include <linux/syscalls.h>
#include "power.h"
@@ -119,378 +108,6 @@ power_attr(pm_test);
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_SUSPEND
-
-static int suspend_test(int level)
-{
-#ifdef CONFIG_PM_DEBUG
- if (pm_test_level == level) {
- printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
- mdelay(5000);
- return 1;
- }
-#endif /* !CONFIG_PM_DEBUG */
- return 0;
-}
-
-#ifdef CONFIG_PM_TEST_SUSPEND
-
-/*
- * We test the system suspend code by setting an RTC wakealarm a short
- * time in the future, then suspending. Suspending the devices won't
- * normally take long ... some systems only need a few milliseconds.
- *
- * The time it takes is system-specific though, so when we test this
- * during system bootup we allow a LOT of time.
- */
-#define TEST_SUSPEND_SECONDS 5
-
-static unsigned long suspend_test_start_time;
-
-static void suspend_test_start(void)
-{
- /* FIXME Use better timebase than "jiffies", ideally a clocksource.
- * What we want is a hardware counter that will work correctly even
- * during the irqs-are-off stages of the suspend/resume cycle...
- */
- suspend_test_start_time = jiffies;
-}
-
-static void suspend_test_finish(const char *label)
-{
- long nj = jiffies - suspend_test_start_time;
- unsigned msec;
-
- msec = jiffies_to_msecs(abs(nj));
- pr_info("PM: %s took %d.%03d seconds\n", label,
- msec / 1000, msec % 1000);
-
- /* Warning on suspend means the RTC alarm period needs to be
- * larger -- the system was sooo slooowwww to suspend that the
- * alarm (should have) fired before the system went to sleep!
- *
- * Warning on either suspend or resume also means the system
- * has some performance issues. The stack dump of a WARN_ON
- * is more likely to get the right attention than a printk...
- */
- WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
-}
-
-#else
-
-static void suspend_test_start(void)
-{
-}
-
-static void suspend_test_finish(const char *label)
-{
-}
-
-#endif
-
-/* This is just an arbitrary number */
-#define FREE_PAGE_NUMBER (100)
-
-static struct platform_suspend_ops *suspend_ops;
-
-/**
- * suspend_set_ops - Set the global suspend method table.
- * @ops: Pointer to ops structure.
- */
-
-void suspend_set_ops(struct platform_suspend_ops *ops)
-{
- mutex_lock(&pm_mutex);
- suspend_ops = ops;
- mutex_unlock(&pm_mutex);
-}
-
-/**
- * suspend_valid_only_mem - generic memory-only valid callback
- *
- * Platform drivers that implement mem suspend only and only need
- * to check for that in their .valid callback can use this instead
- * of rolling their own .valid callback.
- */
-int suspend_valid_only_mem(suspend_state_t state)
-{
- return state == PM_SUSPEND_MEM;
-}
-
-/**
- * suspend_prepare - Do prep work before entering low-power state.
- *
- * This is common code that is called for each state that we're entering.
- * Run suspend notifiers, allocate a console and stop all processes.
- */
-static int suspend_prepare(void)
-{
- int error;
- unsigned int free_pages;
-
- if (!suspend_ops || !suspend_ops->enter)
- return -EPERM;
-
- pm_prepare_console();
-
- error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
- if (error)
- goto Finish;
-
- error = usermodehelper_disable();
- if (error)
- goto Finish;
-
- if (suspend_freeze_processes()) {
- error = -EAGAIN;
- goto Thaw;
- }
-
- free_pages = global_page_state(NR_FREE_PAGES);
- if (free_pages < FREE_PAGE_NUMBER) {
- pr_debug("PM: free some memory\n");
- shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
- if (nr_free_pages() < FREE_PAGE_NUMBER) {
- error = -ENOMEM;
- printk(KERN_ERR "PM: No enough memory\n");
- }
- }
- if (!error)
- return 0;
-
- Thaw:
- suspend_thaw_processes();
- usermodehelper_enable();
- Finish:
- pm_notifier_call_chain(PM_POST_SUSPEND);
- pm_restore_console();
- return error;
-}
-
-/* default implementation */
-void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
-{
- local_irq_disable();
-}
-
-/* default implementation */
-void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
-{
- local_irq_enable();
-}
-
-/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
- *
- * This function should be called after devices have been suspended.
- */
-static int suspend_enter(suspend_state_t state)
-{
- int error;
-
- device_pm_lock();
-
- if (suspend_ops->prepare) {
- error = suspend_ops->prepare();
- if (error)
- goto Done;
- }
-
- error = device_power_down(PMSG_SUSPEND);
- if (error) {
- printk(KERN_ERR "PM: Some devices failed to power down\n");
- goto Platfrom_finish;
- }
-
- if (suspend_ops->prepare_late) {
- error = suspend_ops->prepare_late();
- if (error)
- goto Power_up_devices;
- }
-
- if (suspend_test(TEST_PLATFORM))
- goto Platform_wake;
-
- error = disable_nonboot_cpus();
- if (error || suspend_test(TEST_CPUS))
- goto Enable_cpus;
-
- arch_suspend_disable_irqs();
- BUG_ON(!irqs_disabled());
-
- error = sysdev_suspend(PMSG_SUSPEND);
- if (!error) {
- if (!suspend_test(TEST_CORE))
- error = suspend_ops->enter(state);
- sysdev_resume();
- }
-
- arch_suspend_enable_irqs();
- BUG_ON(irqs_disabled());
-
- Enable_cpus:
- enable_nonboot_cpus();
-
- Platform_wake:
- if (suspend_ops->wake)
- suspend_ops->wake();
-
- Power_up_devices:
- device_power_up(PMSG_RESUME);
-
- Platfrom_finish:
- if (suspend_ops->finish)
- suspend_ops->finish();
-
- Done:
- device_pm_unlock();
-
- return error;
-}
-
-/**
- * suspend_devices_and_enter - suspend devices and enter the desired system
- * sleep state.
- * @state: state to enter
- */
-int suspend_devices_and_enter(suspend_state_t state)
-{
- int error;
-
- if (!suspend_ops)
- return -ENOSYS;
-
- if (suspend_ops->begin) {
- error = suspend_ops->begin(state);
- if (error)
- goto Close;
- }
- suspend_console();
- suspend_test_start();
- error = device_suspend(PMSG_SUSPEND);
- if (error) {
- printk(KERN_ERR "PM: Some devices failed to suspend\n");
- goto Recover_platform;
- }
- suspend_test_finish("suspend devices");
- if (suspend_test(TEST_DEVICES))
- goto Recover_platform;
-
- suspend_enter(state);
-
- Resume_devices:
- suspend_test_start();
- device_resume(PMSG_RESUME);
- suspend_test_finish("resume devices");
- resume_console();
- Close:
- if (suspend_ops->end)
- suspend_ops->end();
- return error;
-
- Recover_platform:
- if (suspend_ops->recover)
- suspend_ops->recover();
- goto Resume_devices;
-}
-
-/**
- * suspend_finish - Do final work before exiting suspend sequence.
- *
- * Call platform code to clean up, restart processes, and free the
- * console that we've allocated. This is not called for suspend-to-disk.
- */
-static void suspend_finish(void)
-{
- suspend_thaw_processes();
- usermodehelper_enable();
- pm_notifier_call_chain(PM_POST_SUSPEND);
- pm_restore_console();
-}
-
-
-
-
-static const char * const pm_states[PM_SUSPEND_MAX] = {
- [PM_SUSPEND_STANDBY] = "standby",
- [PM_SUSPEND_MEM] = "mem",
-};
-
-static inline int valid_state(suspend_state_t state)
-{
- /* All states need lowlevel support and need to be valid
- * to the lowlevel implementation, no valid callback
- * implies that none are valid. */
- if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
- return 0;
- return 1;
-}
-
-
-/**
- * enter_state - Do common work of entering low-power state.
- * @state: pm_state structure for state we're entering.
- *
- * Make sure we're the only ones trying to enter a sleep state. Fail
- * if someone has beat us to it, since we don't want anything weird to
- * happen when we wake up.
- * Then, do the setup for suspend, enter the state, and cleaup (after
- * we've woken up).
- */
-static int enter_state(suspend_state_t state)
-{
- int error;
-
- if (!valid_state(state))
- return -ENODEV;
-
- if (!mutex_trylock(&pm_mutex))
- return -EBUSY;
-
- printk(KERN_INFO "PM: Syncing filesystems ... ");
- sys_sync();
- printk("done.\n");
-
- pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
- error = suspend_prepare();
- if (error)
- goto Unlock;
-
- if (suspend_test(TEST_FREEZER))
- goto Finish;
-
- pr_debug("PM: Entering %s sleep\n", pm_states[state]);
- error = suspend_devices_and_enter(state);
-
- Finish:
- pr_debug("PM: Finishing wakeup.\n");
- suspend_finish();
- Unlock:
- mutex_unlock(&pm_mutex);
- return error;
-}
-
-
-/**
- * pm_suspend - Externally visible function for suspending system.
- * @state: Enumerated value of state to enter.
- *
- * Determine whether or not value is within range, get state
- * structure, and enter (above).
- */
-
-int pm_suspend(suspend_state_t state)
-{
- if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
- return enter_state(state);
- return -EINVAL;
-}
-
-EXPORT_SYMBOL(pm_suspend);
-
-#endif /* CONFIG_SUSPEND */
-
struct kobject *power_kobj;
/**
@@ -503,7 +120,6 @@ struct kobject *power_kobj;
* store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
-
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
@@ -601,7 +217,6 @@ static struct attribute_group attr_group = {
.attrs = g,
};
-
static int __init pm_init(void)
{
power_kobj = kobject_create_and_add("power", NULL);
@@ -611,144 +226,3 @@ static int __init pm_init(void)
}
core_initcall(pm_init);
-
-
-#ifdef CONFIG_PM_TEST_SUSPEND
-
-#include <linux/rtc.h>
-
-/*
- * To test system suspend, we need a hands-off mechanism to resume the
- * system. RTCs wake alarms are a common self-contained mechanism.
- */
-
-static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
-{
- static char err_readtime[] __initdata =
- KERN_ERR "PM: can't read %s time, err %d\n";
- static char err_wakealarm [] __initdata =
- KERN_ERR "PM: can't set %s wakealarm, err %d\n";
- static char err_suspend[] __initdata =
- KERN_ERR "PM: suspend test failed, error %d\n";
- static char info_test[] __initdata =
- KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
-
- unsigned long now;
- struct rtc_wkalrm alm;
- int status;
-
- /* this may fail if the RTC hasn't been initialized */
- status = rtc_read_time(rtc, &alm.time);
- if (status < 0) {
- printk(err_readtime, dev_name(&rtc->dev), status);
- return;
- }
- rtc_tm_to_time(&alm.time, &now);
-
- memset(&alm, 0, sizeof alm);
- rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
- alm.enabled = true;
-
- status = rtc_set_alarm(rtc, &alm);
- if (status < 0) {
- printk(err_wakealarm, dev_name(&rtc->dev), status);
- return;
- }
-
- if (state == PM_SUSPEND_MEM) {
- printk(info_test, pm_states[state]);
- status = pm_suspend(state);
- if (status == -ENODEV)
- state = PM_SUSPEND_STANDBY;
- }
- if (state == PM_SUSPEND_STANDBY) {
- printk(info_test, pm_states[state]);
- status = pm_suspend(state);
- }
- if (status < 0)
- printk(err_suspend, status);
-
- /* Some platforms can't detect that the alarm triggered the
- * wakeup, or (accordingly) disable it after it afterwards.
- * It's supposed to give oneshot behavior; cope.
- */
- alm.enabled = false;
- rtc_set_alarm(rtc, &alm);
-}
-
-static int __init has_wakealarm(struct device *dev, void *name_ptr)
-{
- struct rtc_device *candidate = to_rtc_device(dev);
-
- if (!candidate->ops->set_alarm)
- return 0;
- if (!device_may_wakeup(candidate->dev.parent))
- return 0;
-
- *(const char **)name_ptr = dev_name(dev);
- return 1;
-}
-
-/*
- * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
- * at startup time. They're normally disabled, for faster boot and because
- * we can't know which states really work on this particular system.
- */
-static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
-
-static char warn_bad_state[] __initdata =
- KERN_WARNING "PM: can't test '%s' suspend state\n";
-
-static int __init setup_test_suspend(char *value)
-{
- unsigned i;
-
- /* "=mem" ==> "mem" */
- value++;
- for (i = 0; i < PM_SUSPEND_MAX; i++) {
- if (!pm_states[i])
- continue;
- if (strcmp(pm_states[i], value) != 0)
- continue;
- test_state = (__force suspend_state_t) i;
- return 0;
- }
- printk(warn_bad_state, value);
- return 0;
-}
-__setup("test_suspend", setup_test_suspend);
-
-static int __init test_suspend(void)
-{
- static char warn_no_rtc[] __initdata =
- KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
-
- char *pony = NULL;
- struct rtc_device *rtc = NULL;
-
- /* PM is initialized by now; is that state testable? */
- if (test_state == PM_SUSPEND_ON)
- goto done;
- if (!valid_state(test_state)) {
- printk(warn_bad_state, pm_states[test_state]);
- goto done;
- }
-
- /* RTCs have initialized by now too ... can we use one? */
- class_find_device(rtc_class, NULL, &pony, has_wakealarm);
- if (pony)
- rtc = rtc_class_open(pony);
- if (!rtc) {
- printk(warn_no_rtc);
- goto done;
- }
-
- /* go for it */
- test_wakealarm(rtc, test_state);
- rtc_class_close(rtc);
-done:
- return 0;
-}
-late_initcall(test_suspend);
-
-#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46b5ec7a3afb..26d5a26f82e3 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -45,7 +45,7 @@ static inline char *check_image_kernel(struct swsusp_info *info)
*/
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
-/* kernel/power/disk.c */
+/* kernel/power/hibernate.c */
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void);
@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
-extern unsigned int count_data_pages(void);
+extern int swsusp_shrink_memory(void);
/**
* Auxiliary structure used for reading the snapshot image data and
@@ -147,9 +147,8 @@ extern int swsusp_swap_in_use(void);
*/
#define SF_PLATFORM_MODE 1
-/* kernel/power/disk.c */
+/* kernel/power/hibernate.c */
extern int swsusp_check(void);
-extern int swsusp_shrink_memory(void);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
@@ -161,22 +160,36 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
unsigned int, char *);
#ifdef CONFIG_SUSPEND
-/* kernel/power/main.c */
+/* kernel/power/suspend.c */
+extern const char *const pm_states[];
+
+extern bool valid_state(suspend_state_t state);
extern int suspend_devices_and_enter(suspend_state_t state);
+extern int enter_state(suspend_state_t state);
#else /* !CONFIG_SUSPEND */
static inline int suspend_devices_and_enter(suspend_state_t state)
{
return -ENOSYS;
}
+static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
+static inline bool valid_state(suspend_state_t state) { return false; }
#endif /* !CONFIG_SUSPEND */
+#ifdef CONFIG_PM_TEST_SUSPEND
+/* kernel/power/suspend_test.c */
+extern void suspend_test_start(void);
+extern void suspend_test_finish(const char *label);
+#else /* !CONFIG_PM_TEST_SUSPEND */
+static inline void suspend_test_start(void) {}
+static inline void suspend_test_finish(const char *label) {}
+#endif /* !CONFIG_PM_TEST_SUSPEND */
+
#ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */
extern int pm_notifier_call_chain(unsigned long val);
#endif
#ifdef CONFIG_HIGHMEM
-unsigned int count_highmem_pages(void);
int restore_highmem(void);
#else
static inline unsigned int count_highmem_pages(void) { return 0; }
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 97890831e1b5..e8b337006276 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -34,7 +34,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
.handler = handle_poweroff,
.help_msg = "powerOff",
.action_msg = "Power Off",
- .enable_mask = SYSRQ_ENABLE_BOOT,
+ .enable_mask = SYSRQ_ENABLE_BOOT,
};
static int pm_sysrq_init(void)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index ca634019497a..da2072d73811 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -117,9 +117,12 @@ int freeze_processes(void)
if (error)
goto Exit;
printk("done.");
+
+ oom_killer_disable();
Exit:
BUG_ON(in_atomic());
printk("\n");
+
return error;
}
@@ -145,6 +148,8 @@ static void thaw_tasks(bool nosig_only)
void thaw_processes(void)
{
+ oom_killer_enable();
+
printk("Restarting tasks ... ");
thaw_tasks(true);
thaw_tasks(false);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 33e2e4a819f9..523a451b45d3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,6 +39,14 @@ static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *);
+/*
+ * Preferred image size in bytes (tunable via /sys/power/image_size).
+ * When it is set to N, swsusp will do its best to ensure the image
+ * size will not exceed N bytes, but if that is impossible, it will
+ * try to create the smallest image possible.
+ */
+unsigned long image_size = 500 * 1024 * 1024;
+
/* List of PBEs needed for restoring the pages that were allocated before
* the suspend and included in the suspend image, but have also been
* allocated by the "resume" kernel, so their contents cannot be written
@@ -840,7 +848,7 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
* pages.
*/
-unsigned int count_highmem_pages(void)
+static unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned int n = 0;
@@ -902,7 +910,7 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
* pages.
*/
-unsigned int count_data_pages(void)
+static unsigned int count_data_pages(void)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
@@ -1058,6 +1066,74 @@ void swsusp_free(void)
buffer = NULL;
}
+/**
+ * swsusp_shrink_memory - Try to free as much memory as needed
+ *
+ * ... but do not OOM-kill anyone
+ *
+ * Notice: all userland should be stopped before it is called, or
+ * livelock is possible.
+ */
+
+#define SHRINK_BITE 10000
+static inline unsigned long __shrink_memory(long tmp)
+{
+ if (tmp > SHRINK_BITE)
+ tmp = SHRINK_BITE;
+ return shrink_all_memory(tmp);
+}
+
+int swsusp_shrink_memory(void)
+{
+ long tmp;
+ struct zone *zone;
+ unsigned long pages = 0;
+ unsigned int i = 0;
+ char *p = "-\\|/";
+ struct timeval start, stop;
+
+ printk(KERN_INFO "PM: Shrinking memory... ");
+ do_gettimeofday(&start);
+ do {
+ long size, highmem_size;
+
+ highmem_size = count_highmem_pages();
+ size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
+ tmp = size;
+ size += highmem_size;
+ for_each_populated_zone(zone) {
+ tmp += snapshot_additional_pages(zone);
+ if (is_highmem(zone)) {
+ highmem_size -=
+ zone_page_state(zone, NR_FREE_PAGES);
+ } else {
+ tmp -= zone_page_state(zone, NR_FREE_PAGES);
+ tmp += zone->lowmem_reserve[ZONE_NORMAL];
+ }
+ }
+
+ if (highmem_size < 0)
+ highmem_size = 0;
+
+ tmp += highmem_size;
+ if (tmp > 0) {
+ tmp = __shrink_memory(tmp);
+ if (!tmp)
+ return -ENOMEM;
+ pages += tmp;
+ } else if (size > image_size / PAGE_SIZE) {
+ tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
+ pages += tmp;
+ }
+ printk("\b%c", p[i++%4]);
+ } while (tmp > 0);
+ do_gettimeofday(&stop);
+ printk("\bdone (%lu pages freed)\n", pages);
+ swsusp_show_speed(&start, &stop, pages, "Freed");
+
+ return 0;
+}
+
#ifdef CONFIG_HIGHMEM
/**
* count_pages_for_highmem - compute the number of non-highmem pages
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
new file mode 100644
index 000000000000..6f10dfc2d3e9
--- /dev/null
+++ b/kernel/power/suspend.c
@@ -0,0 +1,300 @@
+/*
+ * kernel/power/suspend.c - Suspend to RAM and standby functionality.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/cpu.h>
+#include <linux/syscalls.h>
+
+#include "power.h"
+
+const char *const pm_states[PM_SUSPEND_MAX] = {
+ [PM_SUSPEND_STANDBY] = "standby",
+ [PM_SUSPEND_MEM] = "mem",
+};
+
+static struct platform_suspend_ops *suspend_ops;
+
+/**
+ * suspend_set_ops - Set the global suspend method table.
+ * @ops: Pointer to ops structure.
+ */
+void suspend_set_ops(struct platform_suspend_ops *ops)
+{
+ mutex_lock(&pm_mutex);
+ suspend_ops = ops;
+ mutex_unlock(&pm_mutex);
+}
+
+bool valid_state(suspend_state_t state)
+{
+ /*
+ * All states need lowlevel support and need to be valid to the lowlevel
+ * implementation, no valid callback implies that none are valid.
+ */
+ return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
+}
+
+/**
+ * suspend_valid_only_mem - generic memory-only valid callback
+ *
+ * Platform drivers that implement mem suspend only and only need
+ * to check for that in their .valid callback can use this instead
+ * of rolling their own .valid callback.
+ */
+int suspend_valid_only_mem(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM;
+}
+
+static int suspend_test(int level)
+{
+#ifdef CONFIG_PM_DEBUG
+ if (pm_test_level == level) {
+ printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
+ mdelay(5000);
+ return 1;
+ }
+#endif /* !CONFIG_PM_DEBUG */
+ return 0;
+}
+
+/**
+ * suspend_prepare - Do prep work before entering low-power state.
+ *
+ * This is common code that is called for each state that we're entering.
+ * Run suspend notifiers, allocate a console and stop all processes.
+ */
+static int suspend_prepare(void)
+{
+ int error;
+
+ if (!suspend_ops || !suspend_ops->enter)
+ return -EPERM;
+
+ pm_prepare_console();
+
+ error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
+ if (error)
+ goto Finish;
+
+ error = usermodehelper_disable();
+ if (error)
+ goto Finish;
+
+ error = suspend_freeze_processes();
+ if (!error)
+ return 0;
+
+ suspend_thaw_processes();
+ usermodehelper_enable();
+ Finish:
+ pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
+ return error;
+}
+
+/* default implementation */
+void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
+{
+ local_irq_disable();
+}
+
+/* default implementation */
+void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
+{
+ local_irq_enable();
+}
+
+/**
+ * suspend_enter - enter the desired system sleep state.
+ * @state: state to enter
+ *
+ * This function should be called after devices have been suspended.
+ */
+static int suspend_enter(suspend_state_t state)
+{
+ int error;
+
+ if (suspend_ops->prepare) {
+ error = suspend_ops->prepare();
+ if (error)
+ return error;
+ }
+
+ error = dpm_suspend_noirq(PMSG_SUSPEND);
+ if (error) {
+ printk(KERN_ERR "PM: Some devices failed to power down\n");
+ goto Platfrom_finish;
+ }
+
+ if (suspend_ops->prepare_late) {
+ error = suspend_ops->prepare_late();
+ if (error)
+ goto Power_up_devices;
+ }
+
+ if (suspend_test(TEST_PLATFORM))
+ goto Platform_wake;
+
+ error = disable_nonboot_cpus();
+ if (error || suspend_test(TEST_CPUS))
+ goto Enable_cpus;
+
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
+ error = sysdev_suspend(PMSG_SUSPEND);
+ if (!error) {
+ if (!suspend_test(TEST_CORE))
+ error = suspend_ops->enter(state);
+ sysdev_resume();
+ }
+
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
+
+ Enable_cpus:
+ enable_nonboot_cpus();
+
+ Platform_wake:
+ if (suspend_ops->wake)
+ suspend_ops->wake();
+
+ Power_up_devices:
+ dpm_resume_noirq(PMSG_RESUME);
+
+ Platfrom_finish:
+ if (suspend_ops->finish)
+ suspend_ops->finish();
+
+ return error;
+}
+
+/**
+ * suspend_devices_and_enter - suspend devices and enter the desired system
+ * sleep state.
+ * @state: state to enter
+ */
+int suspend_devices_and_enter(suspend_state_t state)
+{
+ int error;
+
+ if (!suspend_ops)
+ return -ENOSYS;
+
+ if (suspend_ops->begin) {
+ error = suspend_ops->begin(state);
+ if (error)
+ goto Close;
+ }
+ suspend_console();
+ suspend_test_start();
+ error = dpm_suspend_start(PMSG_SUSPEND);
+ if (error) {
+ printk(KERN_ERR "PM: Some devices failed to suspend\n");
+ goto Recover_platform;
+ }
+ suspend_test_finish("suspend devices");
+ if (suspend_test(TEST_DEVICES))
+ goto Recover_platform;
+
+ suspend_enter(state);
+
+ Resume_devices:
+ suspend_test_start();
+ dpm_resume_end(PMSG_RESUME);
+ suspend_test_finish("resume devices");
+ resume_console();
+ Close:
+ if (suspend_ops->end)
+ suspend_ops->end();
+ return error;
+
+ Recover_platform:
+ if (suspend_ops->recover)
+ suspend_ops->recover();
+ goto Resume_devices;
+}
+
+/**
+ * suspend_finish - Do final work before exiting suspend sequence.
+ *
+ * Call platform code to clean up, restart processes, and free the
+ * console that we've allocated. This is not called for suspend-to-disk.
+ */
+static void suspend_finish(void)
+{
+ suspend_thaw_processes();
+ usermodehelper_enable();
+ pm_notifier_call_chain(PM_POST_SUSPEND);
+ pm_restore_console();
+}
+
+/**
+ * enter_state - Do common work of entering low-power state.
+ * @state: pm_state structure for state we're entering.
+ *
+ * Make sure we're the only ones trying to enter a sleep state. Fail
+ * if someone has beat us to it, since we don't want anything weird to
+ * happen when we wake up.
+ * Then, do the setup for suspend, enter the state, and cleaup (after
+ * we've woken up).
+ */
+int enter_state(suspend_state_t state)
+{
+ int error;
+
+ if (!valid_state(state))
+ return -ENODEV;
+
+ if (!mutex_trylock(&pm_mutex))
+ return -EBUSY;
+
+ printk(KERN_INFO "PM: Syncing filesystems ... ");
+ sys_sync();
+ printk("done.\n");
+
+ pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
+ error = suspend_prepare();
+ if (error)
+ goto Unlock;
+
+ if (suspend_test(TEST_FREEZER))
+ goto Finish;
+
+ pr_debug("PM: Entering %s sleep\n", pm_states[state]);
+ error = suspend_devices_and_enter(state);
+
+ Finish:
+ pr_debug("PM: Finishing wakeup.\n");
+ suspend_finish();
+ Unlock:
+ mutex_unlock(&pm_mutex);
+ return error;
+}
+
+/**
+ * pm_suspend - Externally visible function for suspending system.
+ * @state: Enumerated value of state to enter.
+ *
+ * Determine whether or not value is within range, get state
+ * structure, and enter (above).
+ */
+int pm_suspend(suspend_state_t state)
+{
+ if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
+ return enter_state(state);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
new file mode 100644
index 000000000000..17d8bb1acf9c
--- /dev/null
+++ b/kernel/power/suspend_test.c
@@ -0,0 +1,187 @@
+/*
+ * kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
+ *
+ * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/rtc.h>
+
+#include "power.h"
+
+/*
+ * We test the system suspend code by setting an RTC wakealarm a short
+ * time in the future, then suspending. Suspending the devices won't
+ * normally take long ... some systems only need a few milliseconds.
+ *
+ * The time it takes is system-specific though, so when we test this
+ * during system bootup we allow a LOT of time.
+ */
+#define TEST_SUSPEND_SECONDS 5
+
+static unsigned long suspend_test_start_time;
+
+void suspend_test_start(void)
+{
+ /* FIXME Use better timebase than "jiffies", ideally a clocksource.
+ * What we want is a hardware counter that will work correctly even
+ * during the irqs-are-off stages of the suspend/resume cycle...
+ */
+ suspend_test_start_time = jiffies;
+}
+
+void suspend_test_finish(const char *label)
+{
+ long nj = jiffies - suspend_test_start_time;
+ unsigned msec;
+
+ msec = jiffies_to_msecs(abs(nj));
+ pr_info("PM: %s took %d.%03d seconds\n", label,
+ msec / 1000, msec % 1000);
+
+ /* Warning on suspend means the RTC alarm period needs to be
+ * larger -- the system was sooo slooowwww to suspend that the
+ * alarm (should have) fired before the system went to sleep!
+ *
+ * Warning on either suspend or resume also means the system
+ * has some performance issues. The stack dump of a WARN_ON
+ * is more likely to get the right attention than a printk...
+ */
+ WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
+}
+
+/*
+ * To test system suspend, we need a hands-off mechanism to resume the
+ * system. RTCs wake alarms are a common self-contained mechanism.
+ */
+
+static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
+{
+ static char err_readtime[] __initdata =
+ KERN_ERR "PM: can't read %s time, err %d\n";
+ static char err_wakealarm [] __initdata =
+ KERN_ERR "PM: can't set %s wakealarm, err %d\n";
+ static char err_suspend[] __initdata =
+ KERN_ERR "PM: suspend test failed, error %d\n";
+ static char info_test[] __initdata =
+ KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
+
+ unsigned long now;
+ struct rtc_wkalrm alm;
+ int status;
+
+ /* this may fail if the RTC hasn't been initialized */
+ status = rtc_read_time(rtc, &alm.time);
+ if (status < 0) {
+ printk(err_readtime, dev_name(&rtc->dev), status);
+ return;
+ }
+ rtc_tm_to_time(&alm.time, &now);
+
+ memset(&alm, 0, sizeof alm);
+ rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
+ alm.enabled = true;
+
+ status = rtc_set_alarm(rtc, &alm);
+ if (status < 0) {
+ printk(err_wakealarm, dev_name(&rtc->dev), status);
+ return;
+ }
+
+ if (state == PM_SUSPEND_MEM) {
+ printk(info_test, pm_states[state]);
+ status = pm_suspend(state);
+ if (status == -ENODEV)
+ state = PM_SUSPEND_STANDBY;
+ }
+ if (state == PM_SUSPEND_STANDBY) {
+ printk(info_test, pm_states[state]);
+ status = pm_suspend(state);
+ }
+ if (status < 0)
+ printk(err_suspend, status);
+
+ /* Some platforms can't detect that the alarm triggered the
+ * wakeup, or (accordingly) disable it after it afterwards.
+ * It's supposed to give oneshot behavior; cope.
+ */
+ alm.enabled = false;
+ rtc_set_alarm(rtc, &alm);
+}
+
+static int __init has_wakealarm(struct device *dev, void *name_ptr)
+{
+ struct rtc_device *candidate = to_rtc_device(dev);
+
+ if (!candidate->ops->set_alarm)
+ return 0;
+ if (!device_may_wakeup(candidate->dev.parent))
+ return 0;
+
+ *(const char **)name_ptr = dev_name(dev);
+ return 1;
+}
+
+/*
+ * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
+ * at startup time. They're normally disabled, for faster boot and because
+ * we can't know which states really work on this particular system.
+ */
+static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
+
+static char warn_bad_state[] __initdata =
+ KERN_WARNING "PM: can't test '%s' suspend state\n";
+
+static int __init setup_test_suspend(char *value)
+{
+ unsigned i;
+
+ /* "=mem" ==> "mem" */
+ value++;
+ for (i = 0; i < PM_SUSPEND_MAX; i++) {
+ if (!pm_states[i])
+ continue;
+ if (strcmp(pm_states[i], value) != 0)
+ continue;
+ test_state = (__force suspend_state_t) i;
+ return 0;
+ }
+ printk(warn_bad_state, value);
+ return 0;
+}
+__setup("test_suspend", setup_test_suspend);
+
+static int __init test_suspend(void)
+{
+ static char warn_no_rtc[] __initdata =
+ KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
+
+ char *pony = NULL;
+ struct rtc_device *rtc = NULL;
+
+ /* PM is initialized by now; is that state testable? */
+ if (test_state == PM_SUSPEND_ON)
+ goto done;
+ if (!valid_state(test_state)) {
+ printk(warn_bad_state, pm_states[test_state]);
+ goto done;
+ }
+
+ /* RTCs have initialized by now too ... can we use one? */
+ class_find_device(rtc_class, NULL, &pony, has_wakealarm);
+ if (pony)
+ rtc = rtc_class_open(pony);
+ if (!rtc) {
+ printk(warn_no_rtc);
+ goto done;
+ }
+
+ /* go for it */
+ test_wakealarm(rtc, test_state);
+ rtc_class_close(rtc);
+done:
+ return 0;
+}
+late_initcall(test_suspend);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 78c35047586d..6a07f4dbf2f8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -55,14 +55,6 @@
#include "power.h"
-/*
- * Preferred image size in bytes (tunable via /sys/power/image_size).
- * When it is set to N, swsusp will do its best to ensure the image
- * size will not exceed N bytes, but if that is impossible, it will
- * try to create the smallest image possible.
- */
-unsigned long image_size = 500 * 1024 * 1024;
-
int in_suspend __nosavedata = 0;
/**
@@ -194,193 +186,3 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
centisecs / 100, centisecs % 100,
kps / 1000, (kps % 1000) / 10);
}
-
-/**
- * swsusp_shrink_memory - Try to free as much memory as needed
- *
- * ... but do not OOM-kill anyone
- *
- * Notice: all userland should be stopped before it is called, or
- * livelock is possible.
- */
-
-#define SHRINK_BITE 10000
-static inline unsigned long __shrink_memory(long tmp)
-{
- if (tmp > SHRINK_BITE)
- tmp = SHRINK_BITE;
- return shrink_all_memory(tmp);
-}
-
-int swsusp_shrink_memory(void)
-{
- long tmp;
- struct zone *zone;
- unsigned long pages = 0;
- unsigned int i = 0;
- char *p = "-\\|/";
- struct timeval start, stop;
-
- printk(KERN_INFO "PM: Shrinking memory... ");
- do_gettimeofday(&start);
- do {
- long size, highmem_size;
-
- highmem_size = count_highmem_pages();
- size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
- tmp = size;
- size += highmem_size;
- for_each_populated_zone(zone) {
- tmp += snapshot_additional_pages(zone);
- if (is_highmem(zone)) {
- highmem_size -=
- zone_page_state(zone, NR_FREE_PAGES);
- } else {
- tmp -= zone_page_state(zone, NR_FREE_PAGES);
- tmp += zone->lowmem_reserve[ZONE_NORMAL];
- }
- }
-
- if (highmem_size < 0)
- highmem_size = 0;
-
- tmp += highmem_size;
- if (tmp > 0) {
- tmp = __shrink_memory(tmp);
- if (!tmp)
- return -ENOMEM;
- pages += tmp;
- } else if (size > image_size / PAGE_SIZE) {
- tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
- pages += tmp;
- }
- printk("\b%c", p[i++%4]);
- } while (tmp > 0);
- do_gettimeofday(&stop);
- printk("\bdone (%lu pages freed)\n", pages);
- swsusp_show_speed(&start, &stop, pages, "Freed");
-
- return 0;
-}
-
-/*
- * Platforms, like ACPI, may want us to save some memory used by them during
- * hibernation and to restore the contents of this memory during the subsequent
- * resume. The code below implements a mechanism allowing us to do that.
- */
-
-struct nvs_page {
- unsigned long phys_start;
- unsigned int size;
- void *kaddr;
- void *data;
- struct list_head node;
-};
-
-static LIST_HEAD(nvs_list);
-
-/**
- * hibernate_nvs_register - register platform NVS memory region to save
- * @start - physical address of the region
- * @size - size of the region
- *
- * The NVS region need not be page-aligned (both ends) and we arrange
- * things so that the data from page-aligned addresses in this region will
- * be copied into separate RAM pages.
- */
-int hibernate_nvs_register(unsigned long start, unsigned long size)
-{
- struct nvs_page *entry, *next;
-
- while (size > 0) {
- unsigned int nr_bytes;
-
- entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
- if (!entry)
- goto Error;
-
- list_add_tail(&entry->node, &nvs_list);
- entry->phys_start = start;
- nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
- entry->size = (size < nr_bytes) ? size : nr_bytes;
-
- start += entry->size;
- size -= entry->size;
- }
- return 0;
-
- Error:
- list_for_each_entry_safe(entry, next, &nvs_list, node) {
- list_del(&entry->node);
- kfree(entry);
- }
- return -ENOMEM;
-}
-
-/**
- * hibernate_nvs_free - free data pages allocated for saving NVS regions
- */
-void hibernate_nvs_free(void)
-{
- struct nvs_page *entry;
-
- list_for_each_entry(entry, &nvs_list, node)
- if (entry->data) {
- free_page((unsigned long)entry->data);
- entry->data = NULL;
- if (entry->kaddr) {
- iounmap(entry->kaddr);
- entry->kaddr = NULL;
- }
- }
-}
-
-/**
- * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
- */
-int hibernate_nvs_alloc(void)
-{
- struct nvs_page *entry;
-
- list_for_each_entry(entry, &nvs_list, node) {
- entry->data = (void *)__get_free_page(GFP_KERNEL);
- if (!entry->data) {
- hibernate_nvs_free();
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-/**
- * hibernate_nvs_save - save NVS memory regions
- */
-void hibernate_nvs_save(void)
-{
- struct nvs_page *entry;
-
- printk(KERN_INFO "PM: Saving platform NVS memory\n");
-
- list_for_each_entry(entry, &nvs_list, node)
- if (entry->data) {
- entry->kaddr = ioremap(entry->phys_start, entry->size);
- memcpy(entry->data, entry->kaddr, entry->size);
- }
-}
-
-/**
- * hibernate_nvs_restore - restore NVS memory regions
- *
- * This function is going to be called with interrupts disabled, so it
- * cannot iounmap the virtual addresses used to access the NVS region.
- */
-void hibernate_nvs_restore(void)
-{
- struct nvs_page *entry;
-
- printk(KERN_INFO "PM: Restoring platform NVS memory\n");
-
- list_for_each_entry(entry, &nvs_list, node)
- if (entry->data)
- memcpy(entry->kaddr, entry->data, entry->size);
-}
diff --git a/kernel/printk.c b/kernel/printk.c
index 5052b5497c67..b4d97b54c1ec 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -687,20 +687,35 @@ asmlinkage int vprintk(const char *fmt, va_list args)
sizeof(printk_buf) - printed_len, fmt, args);
+ p = printk_buf;
+
+ /* Do we have a loglevel in the string? */
+ if (p[0] == '<') {
+ unsigned char c = p[1];
+ if (c && p[2] == '>') {
+ switch (c) {
+ case '0' ... '7': /* loglevel */
+ current_log_level = c - '0';
+ /* Fallthrough - make sure we're on a new line */
+ case 'd': /* KERN_DEFAULT */
+ if (!new_text_line) {
+ emit_log_char('\n');
+ new_text_line = 1;
+ }
+ /* Fallthrough - skip the loglevel */
+ case 'c': /* KERN_CONT */
+ p += 3;
+ break;
+ }
+ }
+ }
+
/*
* Copy the output into log_buf. If the caller didn't provide
* appropriate log level tags, we insert them here
*/
- for (p = printk_buf; *p; p++) {
+ for ( ; *p; p++) {
if (new_text_line) {
- /* If a token, set current_log_level and skip over */
- if (p[0] == '<' && p[1] >= '0' && p[1] <= '7' &&
- p[2] == '>') {
- current_log_level = p[1] - '0';
- p += 3;
- printed_len -= 3;
- }
-
/* Always output the token */
emit_log_char('<');
emit_log_char(current_log_level + '0');
diff --git a/kernel/profile.c b/kernel/profile.c
index 7724e0409bae..69911b5745eb 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -111,12 +111,6 @@ int __ref profile_init(void)
/* only text is profiled */
prof_len = (_etext - _stext) >> prof_shift;
buffer_bytes = prof_len*sizeof(atomic_t);
- if (!slab_is_available()) {
- prof_buffer = alloc_bootmem(buffer_bytes);
- alloc_bootmem_cpumask_var(&prof_cpu_mask);
- cpumask_copy(prof_cpu_mask, cpu_possible_mask);
- return 0;
- }
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
return -ENOMEM;
@@ -371,7 +365,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
node = cpu_to_node(cpu);
per_cpu(cpu_profile_flip, cpu) = 0;
if (!per_cpu(cpu_profile_hits, cpu)[1]) {
- page = alloc_pages_node(node,
+ page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
@@ -379,7 +373,7 @@ static int __cpuinit profile_cpu_callback(struct notifier_block *info,
per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
}
if (!per_cpu(cpu_profile_hits, cpu)[0]) {
- page = alloc_pages_node(node,
+ page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO,
0);
if (!page)
@@ -570,14 +564,14 @@ static int create_hash_tables(void)
int node = cpu_to_node(cpu);
struct page *page;
- page = alloc_pages_node(node,
+ page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
0);
if (!page)
goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page);
- page = alloc_pages_node(node,
+ page = alloc_pages_exact_node(node,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
0);
if (!page)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index e950805f8630..f6d8b8cb5e34 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -175,10 +175,11 @@ int ptrace_attach(struct task_struct *task)
if (same_thread_group(task, current))
goto out;
- /* Protect exec's credential calculations against our interference;
- * SUID, SGID and LSM creds get determined differently under ptrace.
+ /* Protect the target's credential calculations against our
+ * interference; SUID, SGID and LSM creds get determined differently
+ * under ptrace.
*/
- retval = mutex_lock_interruptible(&task->cred_exec_mutex);
+ retval = mutex_lock_interruptible(&task->cred_guard_mutex);
if (retval < 0)
goto out;
@@ -222,7 +223,7 @@ repeat:
bad:
write_unlock_irqrestore(&tasklist_lock, flags);
task_unlock(task);
- mutex_unlock(&task->cred_exec_mutex);
+ mutex_unlock(&task->cred_guard_mutex);
out:
return retval;
}
@@ -294,6 +295,8 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
if (child->ptrace) {
child->exit_code = data;
dead = __ptrace_detach(current, child);
+ if (!child->exit_state)
+ wake_up_process(child);
}
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index ce97a4df64d3..beb0e659adcc 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1356,17 +1356,11 @@ static int rcu_sched_grace_period(void *arg)
rcu_ctrlblk.sched_sleep = rcu_sched_sleeping;
spin_unlock_irqrestore(&rcu_ctrlblk.schedlock, flags);
- ret = 0;
+ ret = 0; /* unused */
__wait_event_interruptible(rcu_ctrlblk.sched_wq,
rcu_ctrlblk.sched_sleep != rcu_sched_sleeping,
ret);
- /*
- * Signals would prevent us from sleeping, and we cannot
- * do much with them in any case. So flush them.
- */
- if (ret)
- flush_signals(current);
couldsleepnext = 0;
} while (!kthread_should_stop());
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d2a372fb0b9b..0dccfbba6d26 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1259,31 +1259,44 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
check_cpu_stall(rsp, rdp);
/* Is the RCU core waiting for a quiescent state from this CPU? */
- if (rdp->qs_pending)
+ if (rdp->qs_pending) {
+ rdp->n_rp_qs_pending++;
return 1;
+ }
/* Does this CPU have callbacks ready to invoke? */
- if (cpu_has_callbacks_ready_to_invoke(rdp))
+ if (cpu_has_callbacks_ready_to_invoke(rdp)) {
+ rdp->n_rp_cb_ready++;
return 1;
+ }
/* Has RCU gone idle with this CPU needing another grace period? */
- if (cpu_needs_another_gp(rsp, rdp))
+ if (cpu_needs_another_gp(rsp, rdp)) {
+ rdp->n_rp_cpu_needs_gp++;
return 1;
+ }
/* Has another RCU grace period completed? */
- if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
+ if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */
+ rdp->n_rp_gp_completed++;
return 1;
+ }
/* Has a new RCU grace period started? */
- if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
+ if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */
+ rdp->n_rp_gp_started++;
return 1;
+ }
/* Has an RCU GP gone long enough to send resched IPIs &c? */
if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
- ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
+ ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
+ rdp->n_rp_need_fqs++;
return 1;
+ }
/* nothing to do */
+ rdp->n_rp_need_nothing++;
return 0;
}
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4b1875ba9404..fe1dcdbf1ca3 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -213,7 +213,63 @@ static struct file_operations rcugp_fops = {
.release = single_release,
};
-static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
+static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
+{
+ seq_printf(m, "%3d%cnp=%ld "
+ "qsp=%ld cbr=%ld cng=%ld gpc=%ld gps=%ld nf=%ld nn=%ld\n",
+ rdp->cpu,
+ cpu_is_offline(rdp->cpu) ? '!' : ' ',
+ rdp->n_rcu_pending,
+ rdp->n_rp_qs_pending,
+ rdp->n_rp_cb_ready,
+ rdp->n_rp_cpu_needs_gp,
+ rdp->n_rp_gp_completed,
+ rdp->n_rp_gp_started,
+ rdp->n_rp_need_fqs,
+ rdp->n_rp_need_nothing);
+}
+
+static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
+{
+ int cpu;
+ struct rcu_data *rdp;
+
+ for_each_possible_cpu(cpu) {
+ rdp = rsp->rda[cpu];
+ if (rdp->beenonline)
+ print_one_rcu_pending(m, rdp);
+ }
+}
+
+static int show_rcu_pending(struct seq_file *m, void *unused)
+{
+ seq_puts(m, "rcu:\n");
+ print_rcu_pendings(m, &rcu_state);
+ seq_puts(m, "rcu_bh:\n");
+ print_rcu_pendings(m, &rcu_bh_state);
+ return 0;
+}
+
+static int rcu_pending_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcu_pending, NULL);
+}
+
+static struct file_operations rcu_pending_fops = {
+ .owner = THIS_MODULE,
+ .open = rcu_pending_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *rcudir;
+static struct dentry *datadir;
+static struct dentry *datadir_csv;
+static struct dentry *gpdir;
+static struct dentry *hierdir;
+static struct dentry *rcu_pendingdir;
+
static int __init rcuclassic_trace_init(void)
{
rcudir = debugfs_create_dir("rcu", NULL);
@@ -238,6 +294,11 @@ static int __init rcuclassic_trace_init(void)
NULL, &rcuhier_fops);
if (!hierdir)
goto free_out;
+
+ rcu_pendingdir = debugfs_create_file("rcu_pending", 0444, rcudir,
+ NULL, &rcu_pending_fops);
+ if (!rcu_pendingdir)
+ goto free_out;
return 0;
free_out:
if (datadir)
@@ -257,6 +318,7 @@ static void __exit rcuclassic_trace_cleanup(void)
debugfs_remove(datadir_csv);
debugfs_remove(gpdir);
debugfs_remove(hierdir);
+ debugfs_remove(rcu_pendingdir);
debugfs_remove(rcudir);
}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 69d9cb921ffa..fcd107a78c5a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -300,7 +300,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* assigned pending owner [which might not have taken the
* lock yet]:
*/
-static inline int try_to_steal_lock(struct rt_mutex *lock)
+static inline int try_to_steal_lock(struct rt_mutex *lock,
+ struct task_struct *task)
{
struct task_struct *pendowner = rt_mutex_owner(lock);
struct rt_mutex_waiter *next;
@@ -309,11 +310,11 @@ static inline int try_to_steal_lock(struct rt_mutex *lock)
if (!rt_mutex_owner_pending(lock))
return 0;
- if (pendowner == current)
+ if (pendowner == task)
return 1;
spin_lock_irqsave(&pendowner->pi_lock, flags);
- if (current->prio >= pendowner->prio) {
+ if (task->prio >= pendowner->prio) {
spin_unlock_irqrestore(&pendowner->pi_lock, flags);
return 0;
}
@@ -338,21 +339,21 @@ static inline int try_to_steal_lock(struct rt_mutex *lock)
* We are going to steal the lock and a waiter was
* enqueued on the pending owners pi_waiters queue. So
* we have to enqueue this waiter into
- * current->pi_waiters list. This covers the case,
- * where current is boosted because it holds another
+ * task->pi_waiters list. This covers the case,
+ * where task is boosted because it holds another
* lock and gets unboosted because the booster is
* interrupted, so we would delay a waiter with higher
- * priority as current->normal_prio.
+ * priority as task->normal_prio.
*
* Note: in the rare case of a SCHED_OTHER task changing
* its priority and thus stealing the lock, next->task
- * might be current:
+ * might be task:
*/
- if (likely(next->task != current)) {
- spin_lock_irqsave(&current->pi_lock, flags);
- plist_add(&next->pi_list_entry, &current->pi_waiters);
- __rt_mutex_adjust_prio(current);
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ if (likely(next->task != task)) {
+ spin_lock_irqsave(&task->pi_lock, flags);
+ plist_add(&next->pi_list_entry, &task->pi_waiters);
+ __rt_mutex_adjust_prio(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
}
return 1;
}
@@ -389,7 +390,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock)
*/
mark_rt_mutex_waiters(lock);
- if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
+ if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current))
return 0;
/* We got the lock. */
@@ -411,6 +412,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock)
*/
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
+ struct task_struct *task,
int detect_deadlock)
{
struct task_struct *owner = rt_mutex_owner(lock);
@@ -418,21 +420,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
unsigned long flags;
int chain_walk = 0, res;
- spin_lock_irqsave(&current->pi_lock, flags);
- __rt_mutex_adjust_prio(current);
- waiter->task = current;
+ spin_lock_irqsave(&task->pi_lock, flags);
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
waiter->lock = lock;
- plist_node_init(&waiter->list_entry, current->prio);
- plist_node_init(&waiter->pi_list_entry, current->prio);
+ plist_node_init(&waiter->list_entry, task->prio);
+ plist_node_init(&waiter->pi_list_entry, task->prio);
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
top_waiter = rt_mutex_top_waiter(lock);
plist_add(&waiter->list_entry, &lock->wait_list);
- current->pi_blocked_on = waiter;
+ task->pi_blocked_on = waiter;
- spin_unlock_irqrestore(&current->pi_lock, flags);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
if (waiter == rt_mutex_top_waiter(lock)) {
spin_lock_irqsave(&owner->pi_lock, flags);
@@ -460,7 +462,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
spin_unlock(&lock->wait_lock);
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
- current);
+ task);
spin_lock(&lock->wait_lock);
@@ -605,37 +607,25 @@ void rt_mutex_adjust_pi(struct task_struct *task)
rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
}
-/*
- * Slow path lock function:
+/**
+ * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+ * @lock: the rt_mutex to take
+ * @state: the state the task should block in (TASK_INTERRUPTIBLE
+ * or TASK_UNINTERRUPTIBLE)
+ * @timeout: the pre-initialized and started timer, or NULL for none
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @detect_deadlock: passed to task_blocks_on_rt_mutex
+ *
+ * lock->wait_lock must be held by the caller.
*/
static int __sched
-rt_mutex_slowlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- int detect_deadlock)
+__rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ struct rt_mutex_waiter *waiter,
+ int detect_deadlock)
{
- struct rt_mutex_waiter waiter;
int ret = 0;
- debug_rt_mutex_init_waiter(&waiter);
- waiter.task = NULL;
-
- spin_lock(&lock->wait_lock);
-
- /* Try to acquire the lock again: */
- if (try_to_take_rt_mutex(lock)) {
- spin_unlock(&lock->wait_lock);
- return 0;
- }
-
- set_current_state(state);
-
- /* Setup the timer, when timeout != NULL */
- if (unlikely(timeout)) {
- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
- if (!hrtimer_active(&timeout->timer))
- timeout->task = NULL;
- }
-
for (;;) {
/* Try to acquire the lock: */
if (try_to_take_rt_mutex(lock))
@@ -656,19 +646,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
}
/*
- * waiter.task is NULL the first time we come here and
+ * waiter->task is NULL the first time we come here and
* when we have been woken up by the previous owner
* but the lock got stolen by a higher prio task.
*/
- if (!waiter.task) {
- ret = task_blocks_on_rt_mutex(lock, &waiter,
+ if (!waiter->task) {
+ ret = task_blocks_on_rt_mutex(lock, waiter, current,
detect_deadlock);
/*
* If we got woken up by the owner then start loop
* all over without going into schedule to try
* to get the lock now:
*/
- if (unlikely(!waiter.task)) {
+ if (unlikely(!waiter->task)) {
/*
* Reset the return value. We might
* have returned with -EDEADLK and the
@@ -684,15 +674,52 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
spin_unlock(&lock->wait_lock);
- debug_rt_mutex_print_deadlock(&waiter);
+ debug_rt_mutex_print_deadlock(waiter);
- if (waiter.task)
+ if (waiter->task)
schedule_rt_mutex(lock);
spin_lock(&lock->wait_lock);
set_current_state(state);
}
+ return ret;
+}
+
+/*
+ * Slow path lock function:
+ */
+static int __sched
+rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock)
+{
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+ debug_rt_mutex_init_waiter(&waiter);
+ waiter.task = NULL;
+
+ spin_lock(&lock->wait_lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock)) {
+ spin_unlock(&lock->wait_lock);
+ return 0;
+ }
+
+ set_current_state(state);
+
+ /* Setup the timer, when timeout != NULL */
+ if (unlikely(timeout)) {
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+ if (!hrtimer_active(&timeout->timer))
+ timeout->task = NULL;
+ }
+
+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
+ detect_deadlock);
+
set_current_state(TASK_RUNNING);
if (unlikely(waiter.task))
@@ -864,9 +891,9 @@ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
- * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
- * the timeout structure is provided
- * by the caller
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
*
* @lock: the rt_mutex to be locked
* @timeout: timeout structure or NULL (no timeout)
@@ -875,7 +902,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
- * -ETIMEOUT when the timeout expired
+ * -ETIMEDOUT when the timeout expired
* -EDEADLK when the lock would deadlock (when deadlock detection is on)
*/
int
@@ -913,7 +940,7 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-/***
+/**
* rt_mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
*
@@ -986,6 +1013,59 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
}
/**
+ * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @task: the task to prepare
+ * @detect_deadlock: perform deadlock detection (1) or not (0)
+ *
+ * Returns:
+ * 0 - task blocked on lock
+ * 1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for FUTEX_REQUEUE_PI support.
+ */
+int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task, int detect_deadlock)
+{
+ int ret;
+
+ spin_lock(&lock->wait_lock);
+
+ mark_rt_mutex_waiters(lock);
+
+ if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
+ /* We got the lock for task. */
+ debug_rt_mutex_lock(lock);
+
+ rt_mutex_set_owner(lock, task, 0);
+
+ rt_mutex_deadlock_account_lock(lock, task);
+ return 1;
+ }
+
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+
+
+ if (ret && !waiter->task) {
+ /*
+ * Reset the return value. We might have
+ * returned with -EDEADLK and the owner
+ * released the lock while we were walking the
+ * pi chain. Let the waiter sort it out.
+ */
+ ret = 0;
+ }
+ spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(waiter);
+
+ return ret;
+}
+
+/**
* rt_mutex_next_owner - return the next owner of the lock
*
* @lock: the rt lock query
@@ -1004,3 +1084,57 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
return rt_mutex_top_waiter(lock)->task;
}
+
+/**
+ * rt_mutex_finish_proxy_lock() - Complete lock acquisition
+ * @lock: the rt_mutex we were woken on
+ * @to: the timeout, null if none. hrtimer should already have
+ * been started.
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @detect_deadlock: perform deadlock detection (1) or not (0)
+ *
+ * Complete the lock acquisition started our behalf by another thread.
+ *
+ * Returns:
+ * 0 - success
+ * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
+ *
+ * Special API call for PI-futex requeue support
+ */
+int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *to,
+ struct rt_mutex_waiter *waiter,
+ int detect_deadlock)
+{
+ int ret;
+
+ spin_lock(&lock->wait_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter,
+ detect_deadlock);
+
+ set_current_state(TASK_RUNNING);
+
+ if (unlikely(waiter->task))
+ remove_waiter(lock, waiter);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ spin_unlock(&lock->wait_lock);
+
+ /*
+ * Readjust priority, when we did not get the lock. We might have been
+ * the pending owner and boosted. Since we did not take the lock, the
+ * PI boost has to go.
+ */
+ if (unlikely(ret))
+ rt_mutex_adjust_prio(current);
+
+ return ret;
+}
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index e124bf5800ea..97a2f81866af 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -120,6 +120,14 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
+extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task,
+ int detect_deadlock);
+extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *to,
+ struct rt_mutex_waiter *waiter,
+ int detect_deadlock);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
diff --git a/kernel/sched.c b/kernel/sched.c
index 6530a27052f3..8fb88a906aaa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,6 +39,7 @@
#include <linux/completion.h>
#include <linux/kernel_stat.h>
#include <linux/debug_locks.h>
+#include <linux/perf_counter.h>
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
@@ -68,7 +69,6 @@
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
-#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
@@ -240,7 +240,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
hard = hrtimer_get_expires(&rt_b->rt_period_timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
- HRTIMER_MODE_ABS, 0);
+ HRTIMER_MODE_ABS_PINNED, 0);
}
spin_unlock(&rt_b->rt_runtime_lock);
}
@@ -580,6 +580,7 @@ struct rq {
struct load_weight load;
unsigned long nr_load_updates;
u64 nr_switches;
+ u64 nr_migrations_in;
struct cfs_rq cfs;
struct rt_rq rt;
@@ -626,6 +627,10 @@ struct rq {
struct list_head migration_queue;
#endif
+ /* calc_load related fields */
+ unsigned long calc_load_update;
+ long calc_load_active;
+
#ifdef CONFIG_SCHED_HRTICK
#ifdef CONFIG_SMP
int hrtick_csd_pending;
@@ -688,7 +693,7 @@ static inline int cpu_of(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-static inline void update_rq_clock(struct rq *rq)
+inline void update_rq_clock(struct rq *rq)
{
rq->clock = sched_clock_cpu(cpu_of(rq));
}
@@ -1150,7 +1155,7 @@ static __init void init_hrtick(void)
static void hrtick_start(struct rq *rq, u64 delay)
{
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
- HRTIMER_MODE_REL, 0);
+ HRTIMER_MODE_REL_PINNED, 0);
}
static inline void init_hrtick(void)
@@ -1724,6 +1729,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
}
#endif
+static void calc_load_account_active(struct rq *this_rq);
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
@@ -1963,12 +1970,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.sleep_start -= clock_offset;
if (p->se.block_start)
p->se.block_start -= clock_offset;
+#endif
if (old_cpu != new_cpu) {
- schedstat_inc(p, se.nr_migrations);
+ p->se.nr_migrations++;
+ new_rq->nr_migrations_in++;
+#ifdef CONFIG_SCHEDSTATS
if (task_hot(p, old_rq->clock, NULL))
schedstat_inc(p, se.nr_forced2_migrations);
- }
#endif
+ perf_counter_task_migration(p, new_cpu);
+ }
p->se.vruntime -= old_cfsrq->min_vruntime -
new_cfsrq->min_vruntime;
@@ -2181,6 +2192,7 @@ void kick_process(struct task_struct *p)
smp_send_reschedule(cpu);
preempt_enable();
}
+EXPORT_SYMBOL_GPL(kick_process);
/*
* Return a low guess at the load of a migration-source cpu weighted
@@ -2363,6 +2375,27 @@ static int sched_balance_self(int cpu, int flag)
#endif /* CONFIG_SMP */
+/**
+ * task_oncpu_function_call - call a function on the cpu on which a task runs
+ * @p: the task to evaluate
+ * @func: the function to be called
+ * @info: the function call argument
+ *
+ * Calls the function @func when the task is currently running. This might
+ * be on the current CPU, which just calls the function directly
+ */
+void task_oncpu_function_call(struct task_struct *p,
+ void (*func) (void *info), void *info)
+{
+ int cpu;
+
+ preempt_disable();
+ cpu = task_cpu(p);
+ if (task_curr(p))
+ smp_call_function_single(cpu, func, info, 1);
+ preempt_enable();
+}
+
/***
* try_to_wake_up - wake up a thread
* @p: the to-be-woken-up thread
@@ -2497,6 +2530,17 @@ out:
return success;
}
+/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes. Returns 1 if the process was woken up, 0 if it was already
+ * running.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
+ */
int wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_ALL, 0);
@@ -2519,6 +2563,7 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
+ p->se.nr_migrations = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;
p->se.start_runtime = 0;
@@ -2749,6 +2794,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
+ perf_counter_task_sched_in(current, cpu_of(rq));
finish_lock_switch(rq, prev);
#ifdef CONFIG_SMP
if (post_schedule)
@@ -2805,7 +2851,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* combine the page table reload and the switch backend into
* one hypercall.
*/
- arch_enter_lazy_cpu_mode();
+ arch_start_context_switch(prev);
if (unlikely(!mm)) {
next->active_mm = oldmm;
@@ -2895,19 +2941,81 @@ unsigned long nr_iowait(void)
return sum;
}
-unsigned long nr_active(void)
+/* Variables and functions for calc_load */
+static atomic_long_t calc_load_tasks;
+static unsigned long calc_load_update;
+unsigned long avenrun[3];
+EXPORT_SYMBOL(avenrun);
+
+/**
+ * get_avenrun - get the load average array
+ * @loads: pointer to dest load array
+ * @offset: offset to add
+ * @shift: shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
{
- unsigned long i, running = 0, uninterruptible = 0;
+ loads[0] = (avenrun[0] + offset) << shift;
+ loads[1] = (avenrun[1] + offset) << shift;
+ loads[2] = (avenrun[2] + offset) << shift;
+}
- for_each_online_cpu(i) {
- running += cpu_rq(i)->nr_running;
- uninterruptible += cpu_rq(i)->nr_uninterruptible;
- }
+static unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+ load *= exp;
+ load += active * (FIXED_1 - exp);
+ return load >> FSHIFT;
+}
+
+/*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+ * CPUs have updated calc_load_tasks.
+ */
+void calc_global_load(void)
+{
+ unsigned long upd = calc_load_update + 10;
+ long active;
+
+ if (time_before(jiffies, upd))
+ return;
+
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
- if (unlikely((long)uninterruptible < 0))
- uninterruptible = 0;
+ avenrun[0] = calc_load(avenrun[0], EXP_1, active);
+ avenrun[1] = calc_load(avenrun[1], EXP_5, active);
+ avenrun[2] = calc_load(avenrun[2], EXP_15, active);
- return running + uninterruptible;
+ calc_load_update += LOAD_FREQ;
+}
+
+/*
+ * Either called from update_cpu_load() or from a cpu going idle
+ */
+static void calc_load_account_active(struct rq *this_rq)
+{
+ long nr_active, delta;
+
+ nr_active = this_rq->nr_running;
+ nr_active += (long) this_rq->nr_uninterruptible;
+
+ if (nr_active != this_rq->calc_load_active) {
+ delta = nr_active - this_rq->calc_load_active;
+ this_rq->calc_load_active = nr_active;
+ atomic_long_add(delta, &calc_load_tasks);
+ }
+}
+
+/*
+ * Externally visible per-cpu scheduler statistics:
+ * cpu_nr_migrations(cpu) - number of migrations into that cpu
+ */
+u64 cpu_nr_migrations(int cpu)
+{
+ return cpu_rq(cpu)->nr_migrations_in;
}
/*
@@ -2938,6 +3046,11 @@ static void update_cpu_load(struct rq *this_rq)
new_load += scale-1;
this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
}
+
+ if (time_after_eq(jiffies, this_rq->calc_load_update)) {
+ this_rq->calc_load_update += LOAD_FREQ;
+ calc_load_account_active(this_rq);
+ }
}
#ifdef CONFIG_SMP
@@ -4279,10 +4392,131 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
static struct {
atomic_t load_balancer;
cpumask_var_t cpu_mask;
+ cpumask_var_t ilb_grp_nohz_mask;
} nohz ____cacheline_aligned = {
.load_balancer = ATOMIC_INIT(-1),
};
+int get_nohz_load_balancer(void)
+{
+ return atomic_read(&nohz.load_balancer);
+}
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+/**
+ * lowest_flag_domain - Return lowest sched_domain containing flag.
+ * @cpu: The cpu whose lowest level of sched domain is to
+ * be returned.
+ * @flag: The flag to check for the lowest sched_domain
+ * for the given cpu.
+ *
+ * Returns the lowest sched_domain of a cpu which contains the given flag.
+ */
+static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
+{
+ struct sched_domain *sd;
+
+ for_each_domain(cpu, sd)
+ if (sd && (sd->flags & flag))
+ break;
+
+ return sd;
+}
+
+/**
+ * for_each_flag_domain - Iterates over sched_domains containing the flag.
+ * @cpu: The cpu whose domains we're iterating over.
+ * @sd: variable holding the value of the power_savings_sd
+ * for cpu.
+ * @flag: The flag to filter the sched_domains to be iterated.
+ *
+ * Iterates over all the scheduler domains for a given cpu that has the 'flag'
+ * set, starting from the lowest sched_domain to the highest.
+ */
+#define for_each_flag_domain(cpu, sd, flag) \
+ for (sd = lowest_flag_domain(cpu, flag); \
+ (sd && (sd->flags & flag)); sd = sd->parent)
+
+/**
+ * is_semi_idle_group - Checks if the given sched_group is semi-idle.
+ * @ilb_group: group to be checked for semi-idleness
+ *
+ * Returns: 1 if the group is semi-idle. 0 otherwise.
+ *
+ * We define a sched_group to be semi idle if it has atleast one idle-CPU
+ * and atleast one non-idle CPU. This helper function checks if the given
+ * sched_group is semi-idle or not.
+ */
+static inline int is_semi_idle_group(struct sched_group *ilb_group)
+{
+ cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
+ sched_group_cpus(ilb_group));
+
+ /*
+ * A sched_group is semi-idle when it has atleast one busy cpu
+ * and atleast one idle cpu.
+ */
+ if (cpumask_empty(nohz.ilb_grp_nohz_mask))
+ return 0;
+
+ if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
+ return 0;
+
+ return 1;
+}
+/**
+ * find_new_ilb - Finds the optimum idle load balancer for nomination.
+ * @cpu: The cpu which is nominating a new idle_load_balancer.
+ *
+ * Returns: Returns the id of the idle load balancer if it exists,
+ * Else, returns >= nr_cpu_ids.
+ *
+ * This algorithm picks the idle load balancer such that it belongs to a
+ * semi-idle powersavings sched_domain. The idea is to try and avoid
+ * completely idle packages/cores just for the purpose of idle load balancing
+ * when there are other idle cpu's which are better suited for that job.
+ */
+static int find_new_ilb(int cpu)
+{
+ struct sched_domain *sd;
+ struct sched_group *ilb_group;
+
+ /*
+ * Have idle load balancer selection from semi-idle packages only
+ * when power-aware load balancing is enabled
+ */
+ if (!(sched_smt_power_savings || sched_mc_power_savings))
+ goto out_done;
+
+ /*
+ * Optimize for the case when we have no idle CPUs or only one
+ * idle CPU. Don't walk the sched_domain hierarchy in such cases
+ */
+ if (cpumask_weight(nohz.cpu_mask) < 2)
+ goto out_done;
+
+ for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
+ ilb_group = sd->groups;
+
+ do {
+ if (is_semi_idle_group(ilb_group))
+ return cpumask_first(nohz.ilb_grp_nohz_mask);
+
+ ilb_group = ilb_group->next;
+
+ } while (ilb_group != sd->groups);
+ }
+
+out_done:
+ return cpumask_first(nohz.cpu_mask);
+}
+#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
+static inline int find_new_ilb(int call_cpu)
+{
+ return cpumask_first(nohz.cpu_mask);
+}
+#endif
+
/*
* This routine will try to nominate the ilb (idle load balancing)
* owner among the cpus whose ticks are stopped. ilb owner will do the idle
@@ -4337,8 +4571,24 @@ int select_nohz_load_balancer(int stop_tick)
/* make me the ilb owner */
if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
return 1;
- } else if (atomic_read(&nohz.load_balancer) == cpu)
+ } else if (atomic_read(&nohz.load_balancer) == cpu) {
+ int new_ilb;
+
+ if (!(sched_smt_power_savings ||
+ sched_mc_power_savings))
+ return 1;
+ /*
+ * Check to see if there is a more power-efficient
+ * ilb.
+ */
+ new_ilb = find_new_ilb(cpu);
+ if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
+ atomic_set(&nohz.load_balancer, -1);
+ resched_cpu(new_ilb);
+ return 0;
+ }
return 1;
+ }
} else {
if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
return 0;
@@ -4507,15 +4757,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
}
if (atomic_read(&nohz.load_balancer) == -1) {
- /*
- * simple selection for now: Nominate the
- * first cpu in the nohz list to be the next
- * ilb owner.
- *
- * TBD: Traverse the sched domains and nominate
- * the nearest cpu in the nohz.cpu_mask.
- */
- int ilb = cpumask_first(nohz.cpu_mask);
+ int ilb = find_new_ilb(cpu);
if (ilb < nr_cpu_ids)
resched_cpu(ilb);
@@ -4879,6 +5121,8 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
spin_unlock(&rq->lock);
+ perf_counter_task_tick(curr, cpu);
+
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
trigger_load_balance(rq, cpu);
@@ -5046,13 +5290,15 @@ pick_next_task(struct rq *rq)
/*
* schedule() is the main scheduler function.
*/
-asmlinkage void __sched __schedule(void)
+asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
+need_resched:
+ preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_qsctr_inc(cpu);
@@ -5092,6 +5338,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
+ perf_counter_task_sched_out(prev, next, cpu);
rq->nr_switches++;
rq->curr = next;
@@ -5109,15 +5356,9 @@ need_resched_nonpreemptible:
if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
-}
-asmlinkage void __sched schedule(void)
-{
-need_resched:
- preempt_disable();
- __schedule();
preempt_enable_no_resched();
- if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
+ if (need_resched())
goto need_resched;
}
EXPORT_SYMBOL(schedule);
@@ -5260,7 +5501,7 @@ EXPORT_SYMBOL(default_wake_function);
* started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
-void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
+static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int sync, void *key)
{
wait_queue_t *curr, *next;
@@ -5280,6 +5521,9 @@ void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
* @key: is directly passed to the wakeup function
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
*/
void __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
@@ -5318,6 +5562,9 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
* with each other. This can prevent needless bouncing between CPUs.
*
* On UP it can prevent extra preemption.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
*/
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
@@ -5354,6 +5601,9 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
* awakened in the same order in which they were queued.
*
* See also complete_all(), wait_for_completion() and related routines.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
*/
void complete(struct completion *x)
{
@@ -5371,6 +5621,9 @@ EXPORT_SYMBOL(complete);
* @x: holds the state of this particular completion
*
* This will wake up all threads waiting on this particular completion event.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
*/
void complete_all(struct completion *x)
{
@@ -6529,8 +6782,9 @@ void sched_show_task(struct task_struct *p)
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
- printk(KERN_CONT "%5lu %5d %6d\n", free,
- task_pid_nr(p), task_pid_nr(p->real_parent));
+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
+ task_pid_nr(p), task_pid_nr(p->real_parent),
+ (unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
}
@@ -7009,6 +7263,14 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
}
}
+
+/*
+ * remove the tasks which were accounted by rq from calc_load_tasks.
+ */
+static void calc_global_load_remove(struct rq *rq)
+{
+ atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
+}
#endif /* CONFIG_HOTPLUG_CPU */
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -7243,6 +7505,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
/* Update our root-domain */
rq = cpu_rq(cpu);
spin_lock_irqsave(&rq->lock, flags);
+ rq->calc_load_update = calc_load_update;
+ rq->calc_load_active = 0;
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -7282,7 +7546,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
cpuset_unlock();
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
-
+ calc_global_load_remove(rq);
/*
* No need to migrate the tasks: it was best-effort if
* they didn't take sched_hotcpu_mutex. Just wake up
@@ -7318,8 +7582,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-/* Register at highest priority so that task migration (migrate_all_tasks)
- * happens before everything else.
+/*
+ * Register at high priority so that task migration (migrate_all_tasks)
+ * happens before everything else. This has to be lower priority than
+ * the notifier in the perf_counter subsystem, though.
*/
static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
@@ -7564,24 +7830,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
{
+ gfp_t gfp = GFP_KERNEL;
+
memset(rd, 0, sizeof(*rd));
- if (bootmem) {
- alloc_bootmem_cpumask_var(&def_root_domain.span);
- alloc_bootmem_cpumask_var(&def_root_domain.online);
- alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
- cpupri_init(&rd->cpupri, true);
- return 0;
- }
+ if (bootmem)
+ gfp = GFP_NOWAIT;
- if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
+ if (!alloc_cpumask_var(&rd->span, gfp))
goto out;
- if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
+ if (!alloc_cpumask_var(&rd->online, gfp))
goto free_span;
- if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&rd->rto_mask, gfp))
goto free_online;
- if (cpupri_init(&rd->cpupri, false) != 0)
+ if (cpupri_init(&rd->cpupri, bootmem) != 0)
goto free_rto_mask;
return 0;
@@ -7792,8 +8055,9 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
/*
* The cpus mask in sched_group and sched_domain hangs off the end.
- * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
- * for nr_cpu_ids < CONFIG_NR_CPUS.
+ *
+ * ( See the the comments in include/linux/sched.h:struct sched_group
+ * and struct sched_domain. )
*/
struct static_sched_group {
struct sched_group sg;
@@ -7914,7 +8178,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
struct sched_domain *sd;
sd = &per_cpu(phys_domains, j).sd;
- if (j != cpumask_first(sched_group_cpus(sd->groups))) {
+ if (j != group_first_cpu(sd->groups)) {
/*
* Only add "power" once for each
* physical package.
@@ -7992,7 +8256,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
WARN_ON(!sd || !sd->groups);
- if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
+ if (cpu != group_first_cpu(sd->groups))
return;
child = sd->child;
@@ -8770,6 +9034,8 @@ void __init sched_init_smp(void)
}
#endif /* CONFIG_SMP */
+const_debug unsigned int sysctl_timer_migration = 1;
+
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
@@ -8904,7 +9170,7 @@ void __init sched_init(void)
* we use alloc_bootmem().
*/
if (alloc_size) {
- ptr = (unsigned long)alloc_bootmem(alloc_size);
+ ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
#ifdef CONFIG_FAIR_GROUP_SCHED
init_task_group.se = (struct sched_entity **)ptr;
@@ -8977,6 +9243,8 @@ void __init sched_init(void)
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
rq->nr_running = 0;
+ rq->calc_load_active = 0;
+ rq->calc_load_update = jiffies + LOAD_FREQ;
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -8997,7 +9265,7 @@ void __init sched_init(void)
* 1024) and two child groups A0 and A1 (of weight 1024 each),
* then A0's share of the cpu resource is:
*
- * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
+ * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
*
* We achieve this by letting init_task_group's tasks sit
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
@@ -9084,20 +9352,26 @@ void __init sched_init(void)
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
+
+ calc_load_update = jiffies + LOAD_FREQ;
+
/*
* During early bootup we pretend to be a normal task:
*/
current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
- alloc_bootmem_cpumask_var(&nohz_cpu_mask);
+ alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ
- alloc_bootmem_cpumask_var(&nohz.cpu_mask);
+ alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
+ alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif
- alloc_bootmem_cpumask_var(&cpu_isolated_map);
+ alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */
+ perf_counter_init();
+
scheduler_running = 1;
}
@@ -9839,6 +10113,13 @@ static int sched_rt_global_constraints(void)
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
+ /*
+ * There's always some RT tasks in the root group
+ * -- migration, kstopmachine etc..
+ */
+ if (sysctl_sched_rt_runtime == 0)
+ return -EBUSY;
+
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 819f17ac796e..e1d16c9a7680 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -38,7 +38,8 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
- return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+ return (unsigned long long)(jiffies - INITIAL_JIFFIES)
+ * (NSEC_PER_SEC / HZ);
}
static __read_mostly int sched_clock_running;
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index cdd3c89574cd..7deffc9f0e5f 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -154,8 +154,12 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
*/
int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
{
+ gfp_t gfp = GFP_KERNEL;
int i;
+ if (bootmem)
+ gfp = GFP_NOWAIT;
+
memset(cp, 0, sizeof(*cp));
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
@@ -163,9 +167,7 @@ int __init_refok cpupri_init(struct cpupri *cp, bool bootmem)
spin_lock_init(&vec->lock);
vec->count = 0;
- if (bootmem)
- alloc_bootmem_cpumask_var(&vec->mask);
- else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&vec->mask, gfp))
goto cleanup;
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3816f217f119..5f9650e8fe75 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1487,17 +1487,10 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
find_matching_se(&se, &pse);
- while (se) {
- BUG_ON(!pse);
+ BUG_ON(!pse);
- if (wakeup_preempt_entity(se, pse) == 1) {
- resched_task(curr);
- break;
- }
-
- se = parent_entity(se);
- pse = parent_entity(pse);
- }
+ if (wakeup_preempt_entity(se, pse) == 1)
+ resched_task(curr);
}
static struct task_struct *pick_next_task_fair(struct rq *rq)
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 8a21a2e28c13..499672c10cbd 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -22,7 +22,8 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sy
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
-
+ /* adjust the active tasks as we might go into a long sleep */
+ calc_load_account_active(rq);
return rq->idle;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f2c66f8f9712..9bf0d2a73045 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1591,7 +1591,7 @@ static inline void init_sched_rt_class(void)
unsigned int i;
for_each_possible_cpu(i)
- alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
+ zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_SMP */
diff --git a/kernel/signal.c b/kernel/signal.c
index 94ec0a4dde0f..d81f4952eebb 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -247,14 +247,19 @@ void flush_sigqueue(struct sigpending *queue)
/*
* Flush all pending signals for a task.
*/
+void __flush_signals(struct task_struct *t)
+{
+ clear_tsk_thread_flag(t, TIF_SIGPENDING);
+ flush_sigqueue(&t->pending);
+ flush_sigqueue(&t->signal->shared_pending);
+}
+
void flush_signals(struct task_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&t->sighand->siglock, flags);
- clear_tsk_thread_flag(t, TIF_SIGPENDING);
- flush_sigqueue(&t->pending);
- flush_sigqueue(&t->signal->shared_pending);
+ __flush_signals(t);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
@@ -827,6 +832,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
{
struct sigpending *pending;
struct sigqueue *q;
+ int override_rlimit;
trace_sched_signal_send(sig, t);
@@ -858,9 +864,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
make sure at least one signal gets delivered and don't
pass on the info struct. */
- q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
- (is_si_special(info) ||
- info->si_code >= 0)));
+ if (sig < SIGRTMIN)
+ override_rlimit = (is_si_special(info) || info->si_code >= 0);
+ else
+ override_rlimit = 0;
+
+ q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
+ override_rlimit);
if (q) {
list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
@@ -2276,24 +2286,17 @@ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
return kill_something_info(sig, &info, pid);
}
-static int do_tkill(pid_t tgid, pid_t pid, int sig)
+static int
+do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
{
- int error;
- struct siginfo info;
struct task_struct *p;
unsigned long flags;
-
- error = -ESRCH;
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_code = SI_TKILL;
- info.si_pid = task_tgid_vnr(current);
- info.si_uid = current_uid();
+ int error = -ESRCH;
rcu_read_lock();
p = find_task_by_vpid(pid);
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
- error = check_kill_permission(sig, &info, p);
+ error = check_kill_permission(sig, info, p);
/*
* The null signal is a permissions and process existence
* probe. No signal is actually delivered.
@@ -2303,7 +2306,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
* signal is private anyway.
*/
if (!error && sig && lock_task_sighand(p, &flags)) {
- error = specific_send_sig_info(sig, &info, p);
+ error = specific_send_sig_info(sig, info, p);
unlock_task_sighand(p, &flags);
}
}
@@ -2312,6 +2315,19 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
return error;
}
+static int do_tkill(pid_t tgid, pid_t pid, int sig)
+{
+ struct siginfo info;
+
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_TKILL;
+ info.si_pid = task_tgid_vnr(current);
+ info.si_uid = current_uid();
+
+ return do_send_specific(tgid, pid, sig, &info);
+}
+
/**
* sys_tgkill - send signal to one specific thread
* @tgid: the thread group ID of the thread
@@ -2361,6 +2377,32 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
return kill_proc_info(sig, &info, pid);
}
+long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
+{
+ /* This is only valid for single tasks */
+ if (pid <= 0 || tgid <= 0)
+ return -EINVAL;
+
+ /* Not even root can pretend to send signals from the kernel.
+ Nor can they impersonate a kill(), which adds source info. */
+ if (info->si_code >= 0)
+ return -EPERM;
+ info->si_signo = sig;
+
+ return do_send_specific(tgid, pid, sig, info);
+}
+
+SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
+ siginfo_t __user *, uinfo)
+{
+ siginfo_t info;
+
+ if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
+ return -EFAULT;
+
+ return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
+}
+
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{
struct task_struct *t = current;
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index b28d19135f43..09d7519557d3 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -319,6 +319,15 @@ cant_get_ref:
EXPORT_SYMBOL(slow_work_enqueue);
/*
+ * Schedule a cull of the thread pool at some time in the near future
+ */
+static void slow_work_schedule_cull(void)
+{
+ mod_timer(&slow_work_cull_timer,
+ round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
+}
+
+/*
* Worker thread culling algorithm
*/
static bool slow_work_cull_thread(void)
@@ -335,8 +344,7 @@ static bool slow_work_cull_thread(void)
list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) >
slow_work_min_threads) {
- mod_timer(&slow_work_cull_timer,
- jiffies + SLOW_WORK_CULL_TIMEOUT);
+ slow_work_schedule_cull();
do_cull = true;
}
}
@@ -372,8 +380,8 @@ static int slow_work_thread(void *_data)
vsmax *= atomic_read(&slow_work_thread_count);
vsmax /= 100;
- prepare_to_wait(&slow_work_thread_wq, &wait,
- TASK_INTERRUPTIBLE);
+ prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
+ TASK_INTERRUPTIBLE);
if (!freezing(current) &&
!slow_work_threads_should_exit &&
!slow_work_available(vsmax) &&
@@ -393,8 +401,7 @@ static int slow_work_thread(void *_data)
list_empty(&vslow_work_queue) &&
atomic_read(&slow_work_thread_count) >
slow_work_min_threads)
- mod_timer(&slow_work_cull_timer,
- jiffies + SLOW_WORK_CULL_TIMEOUT);
+ slow_work_schedule_cull();
continue;
}
@@ -458,7 +465,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
if (atomic_dec_and_test(&slow_work_thread_count))
BUG(); /* we're running on a slow work thread... */
mod_timer(&slow_work_oom_timer,
- jiffies + SLOW_WORK_OOM_TIMEOUT);
+ round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
} else {
/* ratelimit the starting of new threads */
mod_timer(&slow_work_oom_timer, jiffies + 1);
@@ -502,8 +509,7 @@ static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
if (n < 0 && !slow_work_may_not_start_new_thread)
slow_work_enqueue(&slow_work_new_thread);
else if (n > 0)
- mod_timer(&slow_work_cull_timer,
- jiffies + SLOW_WORK_CULL_TIMEOUT);
+ slow_work_schedule_cull();
}
mutex_unlock(&slow_work_user_lock);
}
@@ -529,8 +535,7 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
atomic_read(&slow_work_thread_count);
if (n < 0)
- mod_timer(&slow_work_cull_timer,
- jiffies + SLOW_WORK_CULL_TIMEOUT);
+ slow_work_schedule_cull();
}
mutex_unlock(&slow_work_user_lock);
}
diff --git a/kernel/smp.c b/kernel/smp.c
index 858baac568ee..ad63d8501207 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -52,7 +52,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return NOTIFY_BAD;
break;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index dc4d0cfdcb2d..b41fb710e114 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -382,6 +382,17 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_hi_schedule);
+void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ BUG_ON(!irqs_disabled());
+
+ t->next = __get_cpu_var(tasklet_hi_vec).head;
+ __get_cpu_var(tasklet_hi_vec).head = t;
+ __raise_softirq_irqoff(HI_SOFTIRQ);
+}
+
+EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
static void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
@@ -827,7 +838,7 @@ int __init __weak arch_early_irq_init(void)
return 0;
}
-int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
+int __weak arch_init_chip_data(struct irq_desc *desc, int node)
{
return 0;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index e7998cf31498..b3f1097c76fa 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -14,6 +14,7 @@
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
+#include <linux/perf_counter.h>
#include <linux/resource.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
@@ -1112,289 +1113,6 @@ out:
return err;
}
-/*
- * Supplementary group IDs
- */
-
-/* init to 2 - one for init_task, one to ensure it is never freed */
-struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
-
-struct group_info *groups_alloc(int gidsetsize)
-{
- struct group_info *group_info;
- int nblocks;
- int i;
-
- nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
- /* Make sure we always allocate at least one indirect block pointer */
- nblocks = nblocks ? : 1;
- group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
- if (!group_info)
- return NULL;
- group_info->ngroups = gidsetsize;
- group_info->nblocks = nblocks;
- atomic_set(&group_info->usage, 1);
-
- if (gidsetsize <= NGROUPS_SMALL)
- group_info->blocks[0] = group_info->small_block;
- else {
- for (i = 0; i < nblocks; i++) {
- gid_t *b;
- b = (void *)__get_free_page(GFP_USER);
- if (!b)
- goto out_undo_partial_alloc;
- group_info->blocks[i] = b;
- }
- }
- return group_info;
-
-out_undo_partial_alloc:
- while (--i >= 0) {
- free_page((unsigned long)group_info->blocks[i]);
- }
- kfree(group_info);
- return NULL;
-}
-
-EXPORT_SYMBOL(groups_alloc);
-
-void groups_free(struct group_info *group_info)
-{
- if (group_info->blocks[0] != group_info->small_block) {
- int i;
- for (i = 0; i < group_info->nblocks; i++)
- free_page((unsigned long)group_info->blocks[i]);
- }
- kfree(group_info);
-}
-
-EXPORT_SYMBOL(groups_free);
-
-/* export the group_info to a user-space array */
-static int groups_to_user(gid_t __user *grouplist,
- const struct group_info *group_info)
-{
- int i;
- unsigned int count = group_info->ngroups;
-
- for (i = 0; i < group_info->nblocks; i++) {
- unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
- unsigned int len = cp_count * sizeof(*grouplist);
-
- if (copy_to_user(grouplist, group_info->blocks[i], len))
- return -EFAULT;
-
- grouplist += NGROUPS_PER_BLOCK;
- count -= cp_count;
- }
- return 0;
-}
-
-/* fill a group_info from a user-space array - it must be allocated already */
-static int groups_from_user(struct group_info *group_info,
- gid_t __user *grouplist)
-{
- int i;
- unsigned int count = group_info->ngroups;
-
- for (i = 0; i < group_info->nblocks; i++) {
- unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
- unsigned int len = cp_count * sizeof(*grouplist);
-
- if (copy_from_user(group_info->blocks[i], grouplist, len))
- return -EFAULT;
-
- grouplist += NGROUPS_PER_BLOCK;
- count -= cp_count;
- }
- return 0;
-}
-
-/* a simple Shell sort */
-static void groups_sort(struct group_info *group_info)
-{
- int base, max, stride;
- int gidsetsize = group_info->ngroups;
-
- for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
- ; /* nothing */
- stride /= 3;
-
- while (stride) {
- max = gidsetsize - stride;
- for (base = 0; base < max; base++) {
- int left = base;
- int right = left + stride;
- gid_t tmp = GROUP_AT(group_info, right);
-
- while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
- GROUP_AT(group_info, right) =
- GROUP_AT(group_info, left);
- right = left;
- left -= stride;
- }
- GROUP_AT(group_info, right) = tmp;
- }
- stride /= 3;
- }
-}
-
-/* a simple bsearch */
-int groups_search(const struct group_info *group_info, gid_t grp)
-{
- unsigned int left, right;
-
- if (!group_info)
- return 0;
-
- left = 0;
- right = group_info->ngroups;
- while (left < right) {
- unsigned int mid = (left+right)/2;
- int cmp = grp - GROUP_AT(group_info, mid);
- if (cmp > 0)
- left = mid + 1;
- else if (cmp < 0)
- right = mid;
- else
- return 1;
- }
- return 0;
-}
-
-/**
- * set_groups - Change a group subscription in a set of credentials
- * @new: The newly prepared set of credentials to alter
- * @group_info: The group list to install
- *
- * Validate a group subscription and, if valid, insert it into a set
- * of credentials.
- */
-int set_groups(struct cred *new, struct group_info *group_info)
-{
- int retval;
-
- retval = security_task_setgroups(group_info);
- if (retval)
- return retval;
-
- put_group_info(new->group_info);
- groups_sort(group_info);
- get_group_info(group_info);
- new->group_info = group_info;
- return 0;
-}
-
-EXPORT_SYMBOL(set_groups);
-
-/**
- * set_current_groups - Change current's group subscription
- * @group_info: The group list to impose
- *
- * Validate a group subscription and, if valid, impose it upon current's task
- * security record.
- */
-int set_current_groups(struct group_info *group_info)
-{
- struct cred *new;
- int ret;
-
- new = prepare_creds();
- if (!new)
- return -ENOMEM;
-
- ret = set_groups(new, group_info);
- if (ret < 0) {
- abort_creds(new);
- return ret;
- }
-
- return commit_creds(new);
-}
-
-EXPORT_SYMBOL(set_current_groups);
-
-SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
-{
- const struct cred *cred = current_cred();
- int i;
-
- if (gidsetsize < 0)
- return -EINVAL;
-
- /* no need to grab task_lock here; it cannot change */
- i = cred->group_info->ngroups;
- if (gidsetsize) {
- if (i > gidsetsize) {
- i = -EINVAL;
- goto out;
- }
- if (groups_to_user(grouplist, cred->group_info)) {
- i = -EFAULT;
- goto out;
- }
- }
-out:
- return i;
-}
-
-/*
- * SMP: Our groups are copy-on-write. We can set them safely
- * without another task interfering.
- */
-
-SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
-{
- struct group_info *group_info;
- int retval;
-
- if (!capable(CAP_SETGID))
- return -EPERM;
- if ((unsigned)gidsetsize > NGROUPS_MAX)
- return -EINVAL;
-
- group_info = groups_alloc(gidsetsize);
- if (!group_info)
- return -ENOMEM;
- retval = groups_from_user(group_info, grouplist);
- if (retval) {
- put_group_info(group_info);
- return retval;
- }
-
- retval = set_current_groups(group_info);
- put_group_info(group_info);
-
- return retval;
-}
-
-/*
- * Check whether we're fsgid/egid or in the supplemental group..
- */
-int in_group_p(gid_t grp)
-{
- const struct cred *cred = current_cred();
- int retval = 1;
-
- if (grp != cred->fsgid)
- retval = groups_search(cred->group_info, grp);
- return retval;
-}
-
-EXPORT_SYMBOL(in_group_p);
-
-int in_egroup_p(gid_t grp)
-{
- const struct cred *cred = current_cred();
- int retval = 1;
-
- if (grp != cred->egid)
- retval = groups_search(cred->group_info, grp);
- return retval;
-}
-
-EXPORT_SYMBOL(in_egroup_p);
-
DECLARE_RWSEM(uts_sem);
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
@@ -1793,6 +1511,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SET_TSC:
error = SET_TSC_CTL(arg2);
break;
+ case PR_TASK_PERF_COUNTERS_DISABLE:
+ error = perf_counter_task_disable();
+ break;
+ case PR_TASK_PERF_COUNTERS_ENABLE:
+ error = perf_counter_task_enable();
+ break;
case PR_GET_TIMERSLACK:
error = current->timer_slack_ns;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 27dad2967387..68320f6b07b5 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -175,3 +175,6 @@ cond_syscall(compat_sys_timerfd_settime);
cond_syscall(compat_sys_timerfd_gettime);
cond_syscall(sys_eventfd);
cond_syscall(sys_eventfd2);
+
+/* performance counters: */
+cond_syscall(sys_perf_counter_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index b2970d56fb76..ab462b9968d5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
+#include <linux/kmemcheck.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -49,6 +50,7 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/slow-work.h>
+#include <linux/perf_counter.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -114,6 +116,7 @@ static int ngroups_max = NGROUPS_MAX;
#ifdef CONFIG_MODULES
extern char modprobe_path[];
+extern int modules_disabled;
#endif
#ifdef CONFIG_CHR_DEV_SG
extern int sg_big_buff;
@@ -326,6 +329,14 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "timer_migration",
+ .data = &sysctl_timer_migration,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
{
.ctl_name = CTL_UNNUMBERED,
@@ -534,6 +545,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dostring,
.strategy = &sysctl_string,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "modules_disabled",
+ .data = &modules_disabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ /* only handle a transition from default "0" to "1" */
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &one,
+ },
#endif
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
{
@@ -731,6 +753,14 @@ static struct ctl_table kern_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
+ .procname = "bootloader_version",
+ .data = &bootloader_version,
+ .maxlen = sizeof (int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
.procname = "kstack_depth_to_print",
.data = &kstack_depth_to_print,
.maxlen = sizeof(int),
@@ -912,6 +942,43 @@ static struct ctl_table kern_table[] = {
.child = slow_work_sysctls,
},
#endif
+#ifdef CONFIG_PERF_COUNTERS
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perf_counter_paranoid",
+ .data = &sysctl_perf_counter_paranoid,
+ .maxlen = sizeof(sysctl_perf_counter_paranoid),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perf_counter_mlock_kb",
+ .data = &sysctl_perf_counter_mlock,
+ .maxlen = sizeof(sysctl_perf_counter_mlock),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perf_counter_max_sample_rate",
+ .data = &sysctl_perf_counter_sample_rate,
+ .maxlen = sizeof(sysctl_perf_counter_sample_rate),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_KMEMCHECK
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "kmemcheck",
+ .data = &kmemcheck_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1225,7 +1292,6 @@ static struct ctl_table vm_table[] = {
.strategy = &sysctl_jiffies,
},
#endif
-#ifdef CONFIG_SECURITY
{
.ctl_name = CTL_UNNUMBERED,
.procname = "mmap_min_addr",
@@ -1234,7 +1300,6 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &proc_doulongvec_minmax,
},
-#endif
#ifdef CONFIG_NUMA
{
.ctl_name = CTL_UNNUMBERED,
@@ -1272,7 +1337,6 @@ static struct ctl_table vm_table[] = {
.extra2 = &one,
},
#endif
-#ifdef CONFIG_UNEVICTABLE_LRU
{
.ctl_name = CTL_UNNUMBERED,
.procname = "scan_unevictable_pages",
@@ -1281,7 +1345,6 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = &scan_unevictable_handler,
},
-#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index d13be216a790..1ad6dd461119 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -18,6 +18,7 @@
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysdev.h>
+#include <linux/tick.h>
/* The registered clock event devices */
static LIST_HEAD(clockevent_devices);
@@ -54,6 +55,7 @@ unsigned long clockevent_delta2ns(unsigned long latch,
return (unsigned long) clc;
}
+EXPORT_SYMBOL_GPL(clockevent_delta2ns);
/**
* clockevents_set_mode - set the operating mode of a clock event device
@@ -187,6 +189,7 @@ void clockevents_register_device(struct clock_event_device *dev)
spin_unlock(&clockevents_lock);
}
+EXPORT_SYMBOL_GPL(clockevents_register_device);
/*
* Noop handler when we shut down an event device
@@ -251,4 +254,15 @@ void clockevents_notify(unsigned long reason, void *arg)
spin_unlock(&clockevents_lock);
}
EXPORT_SYMBOL_GPL(clockevents_notify);
+
+ktime_t clockevents_get_next_event(int cpu)
+{
+ struct tick_device *td;
+ struct clock_event_device *dev;
+
+ td = &per_cpu(tick_cpu_device, cpu);
+ dev = td->evtdev;
+
+ return dev->next_event;
+}
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ecfd7b5187e0..592bf584d1d2 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -402,9 +402,6 @@ int clocksource_register(struct clocksource *c)
unsigned long flags;
int ret;
- /* save mult_orig on registration */
- c->mult_orig = c->mult;
-
spin_lock_irqsave(&clocksource_lock, flags);
ret = clocksource_enqueue(c);
if (!ret)
@@ -512,6 +509,18 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
}
}
+ /*
+ * Check to make sure we don't switch to a non-highres capable
+ * clocksource if the tick code is in oneshot mode (highres or nohz)
+ */
+ if (tick_oneshot_mode_active() &&
+ !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
+ printk(KERN_WARNING "%s clocksource is not HRT compatible. "
+ "Cannot switch while in HRT/NOHZ mode\n", ovr->name);
+ ovr = NULL;
+ override_name[0] = 0;
+ }
+
/* Reselect, when the override name has changed */
if (ovr != clocksource_override) {
clocksource_override = ovr;
@@ -540,7 +549,13 @@ sysfs_show_available_clocksources(struct sys_device *dev,
spin_lock_irq(&clocksource_lock);
list_for_each_entry(src, &clocksource_list, list) {
- count += snprintf(buf + count,
+ /*
+ * Don't show non-HRES clocksource if the tick code is
+ * in one shot mode (highres=on or nohz=on)
+ */
+ if (!tick_oneshot_mode_active() ||
+ (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
+ count += snprintf(buf + count,
max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
"%s ", src->name);
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 118a3b3b3f9a..877dbedc3118 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -27,7 +27,7 @@
* timer stops in C3 state.
*/
-struct tick_device tick_broadcast_device;
+static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 2e8de678e767..a96c0e2b89cf 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -128,6 +128,23 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
return 0;
}
+/**
+ * tick_check_oneshot_mode - check whether the system is in oneshot mode
+ *
+ * returns 1 when either nohz or highres are enabled. otherwise 0.
+ */
+int tick_oneshot_mode_active(void)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
#ifdef CONFIG_HIGH_RES_TIMERS
/**
* tick_init_highres - switch to high resolution mode
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d3f1ef4d5cbe..2aff39c6f10c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -349,7 +349,7 @@ void tick_nohz_stop_sched_tick(int inidle)
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start(&ts->sched_timer, expires,
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_PINNED);
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
goto out;
@@ -395,7 +395,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_PINNED);
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
break;
@@ -698,7 +698,8 @@ void tick_setup_sched_timer(void)
for (;;) {
hrtimer_forward(&ts->sched_timer, now, tick_period);
- hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS);
+ hrtimer_start_expires(&ts->sched_timer,
+ HRTIMER_MODE_ABS_PINNED);
/* Check, if the timer was already in the past */
if (hrtimer_active(&ts->sched_timer))
break;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 687dff49f6e7..e8c77d9c633a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -22,7 +22,7 @@
/*
* This read-write spinlock protects us from races in SMP while
- * playing with xtime and avenrun.
+ * playing with xtime.
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
@@ -77,6 +77,10 @@ static void clocksource_forward_now(void)
clock->cycle_last = cycle_now;
nsec = cyc2ns(clock, cycle_delta);
+
+ /* If arch requires, add in gettimeoffset() */
+ nsec += arch_gettimeoffset();
+
timespec_add_ns(&xtime, nsec);
nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
@@ -111,6 +115,9 @@ void getnstimeofday(struct timespec *ts)
/* convert to nanoseconds: */
nsecs = cyc2ns(clock, cycle_delta);
+ /* If arch requires, add in gettimeoffset() */
+ nsecs += arch_gettimeoffset();
+
} while (read_seqretry(&xtime_lock, seq));
timespec_add_ns(ts, nsecs);
diff --git a/kernel/timer.c b/kernel/timer.c
index cffffad01c31..54d3912f8cad 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,6 +37,8 @@
#include <linux/delay.h>
#include <linux/tick.h>
#include <linux/kallsyms.h>
+#include <linux/perf_counter.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -604,13 +606,12 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
+__mod_timer(struct timer_list *timer, unsigned long expires,
+ bool pending_only, int pinned)
{
struct tvec_base *base, *new_base;
unsigned long flags;
- int ret;
-
- ret = 0;
+ int ret = 0 , cpu;
timer_stats_timer_set_start_info(timer);
BUG_ON(!timer->function);
@@ -629,6 +630,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
new_base = __get_cpu_var(tvec_bases);
+ cpu = smp_processor_id();
+
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+ if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
+ int preferred_cpu = get_nohz_load_balancer();
+
+ if (preferred_cpu >= 0)
+ cpu = preferred_cpu;
+ }
+#endif
+ new_base = per_cpu(tvec_bases, cpu);
+
if (base != new_base) {
/*
* We are trying to schedule the timer on the local CPU.
@@ -668,7 +681,7 @@ out_unlock:
*/
int mod_timer_pending(struct timer_list *timer, unsigned long expires)
{
- return __mod_timer(timer, expires, true);
+ return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
}
EXPORT_SYMBOL(mod_timer_pending);
@@ -702,11 +715,33 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
if (timer->expires == expires && timer_pending(timer))
return 1;
- return __mod_timer(timer, expires, false);
+ return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
}
EXPORT_SYMBOL(mod_timer);
/**
+ * mod_timer_pinned - modify a timer's timeout
+ * @timer: the timer to be modified
+ * @expires: new timeout in jiffies
+ *
+ * mod_timer_pinned() is a way to update the expire field of an
+ * active timer (if the timer is inactive it will be activated)
+ * and not allow the timer to be migrated to a different CPU.
+ *
+ * mod_timer_pinned(timer, expires) is equivalent to:
+ *
+ * del_timer(timer); timer->expires = expires; add_timer(timer);
+ */
+int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
+{
+ if (timer->expires == expires && timer_pending(timer))
+ return 1;
+
+ return __mod_timer(timer, expires, false, TIMER_PINNED);
+}
+EXPORT_SYMBOL(mod_timer_pinned);
+
+/**
* add_timer - start a timer
* @timer: the timer to be added
*
@@ -756,6 +791,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
wake_up_idle_cpu(cpu);
spin_unlock_irqrestore(&base->lock, flags);
}
+EXPORT_SYMBOL_GPL(add_timer_on);
/**
* del_timer - deactive a timer.
@@ -1015,6 +1051,9 @@ cascade:
index = slot = timer_jiffies & TVN_MASK;
do {
list_for_each_entry(nte, varp->vec + slot, entry) {
+ if (tbase_get_deferrable(nte->base))
+ continue;
+
found = 1;
if (time_before(nte->expires, expires))
expires = nte->expires;
@@ -1123,53 +1162,14 @@ void update_process_times(int user_tick)
}
/*
- * Nr of active tasks - counted in fixed-point numbers
- */
-static unsigned long count_active_tasks(void)
-{
- return nr_active() * FIXED_1;
-}
-
-/*
- * Hmm.. Changed this, as the GNU make sources (load.c) seems to
- * imply that avenrun[] is the standard name for this kind of thing.
- * Nothing else seems to be standardized: the fractional size etc
- * all seem to differ on different machines.
- *
- * Requires xtime_lock to access.
- */
-unsigned long avenrun[3];
-
-EXPORT_SYMBOL(avenrun);
-
-/*
- * calc_load - given tick count, update the avenrun load estimates.
- * This is called while holding a write_lock on xtime_lock.
- */
-static inline void calc_load(unsigned long ticks)
-{
- unsigned long active_tasks; /* fixed-point */
- static int count = LOAD_FREQ;
-
- count -= ticks;
- if (unlikely(count < 0)) {
- active_tasks = count_active_tasks();
- do {
- CALC_LOAD(avenrun[0], EXP_1, active_tasks);
- CALC_LOAD(avenrun[1], EXP_5, active_tasks);
- CALC_LOAD(avenrun[2], EXP_15, active_tasks);
- count += LOAD_FREQ;
- } while (count < 0);
- }
-}
-
-/*
* This function runs timers and the timer-tq in bottom half context.
*/
static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = __get_cpu_var(tvec_bases);
+ perf_counter_do_pending();
+
hrtimer_run_pending();
if (time_after_eq(jiffies, base->timer_jiffies))
@@ -1187,16 +1187,6 @@ void run_local_timers(void)
}
/*
- * Called by the timer interrupt. xtime_lock must already be taken
- * by the timer IRQ!
- */
-static inline void update_times(unsigned long ticks)
-{
- update_wall_time();
- calc_load(ticks);
-}
-
-/*
* The 64-bit jiffies value is not atomic - you MUST NOT read it
* without sampling the sequence number in xtime_lock.
* jiffies is defined in the linker script...
@@ -1205,7 +1195,8 @@ static inline void update_times(unsigned long ticks)
void do_timer(unsigned long ticks)
{
jiffies_64 += ticks;
- update_times(ticks);
+ update_wall_time();
+ calc_global_load();
}
#ifdef __ARCH_WANT_SYS_ALARM
@@ -1353,7 +1344,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
- __mod_timer(&timer, expire, false);
+ __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
schedule();
del_singleshot_timer_sync(&timer);
@@ -1406,37 +1397,17 @@ int do_sysinfo(struct sysinfo *info)
{
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
- unsigned long seq;
+ struct timespec tp;
memset(info, 0, sizeof(struct sysinfo));
- do {
- struct timespec tp;
- seq = read_seqbegin(&xtime_lock);
-
- /*
- * This is annoying. The below is the same thing
- * posix_get_clock_monotonic() does, but it wants to
- * take the lock which we want to cover the loads stuff
- * too.
- */
-
- getnstimeofday(&tp);
- tp.tv_sec += wall_to_monotonic.tv_sec;
- tp.tv_nsec += wall_to_monotonic.tv_nsec;
- monotonic_to_bootbased(&tp);
- if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
- tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
- tp.tv_sec++;
- }
- info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
+ ktime_get_ts(&tp);
+ monotonic_to_bootbased(&tp);
+ info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
- info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
- info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
- info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
+ get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
- info->procs = nr_threads;
- } while (read_seqretry(&xtime_lock, seq));
+ info->procs = nr_threads;
si_meminfo(info);
si_swapinfo(info);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d7f01e6e8ba5..ae048a2dbbe8 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER
select MARKERS
bool
+# All tracer options should select GENERIC_TRACER. For those options that are
+# enabled by all tracers (context switch and event tracer) they select TRACING.
+# This allows those options to appear when no other tracer is selected. But the
+# options do not appear when something else selects it. We need the two options
+# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
+# hidding of the automatic options options.
+
config TRACING
bool
select DEBUG_FS
@@ -66,6 +73,10 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
+config GENERIC_TRACER
+ bool
+ select TRACING
+
#
# Minimum requirements an architecture has to meet for us to
# be able to offer generic tracing facilities:
@@ -95,7 +106,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER
select KALLSYMS
- select TRACING
+ select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
Enable the kernel to trace every kernel function. This is done
@@ -126,7 +137,7 @@ config IRQSOFF_TRACER
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
select TRACE_IRQFLAGS
- select TRACING
+ select GENERIC_TRACER
select TRACER_MAX_TRACE
help
This option measures the time spent in irqs-off critical
@@ -136,7 +147,7 @@ config IRQSOFF_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /debugfs/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
(Note that kernel size and overhead increases with this option
enabled. This option and the preempt-off timing option can be
@@ -147,7 +158,7 @@ config PREEMPT_TRACER
default n
depends on GENERIC_TIME
depends on PREEMPT
- select TRACING
+ select GENERIC_TRACER
select TRACER_MAX_TRACE
help
This option measures the time spent in preemption off critical
@@ -157,7 +168,7 @@ config PREEMPT_TRACER
disabled by default and can be runtime (re-)started
via:
- echo 0 > /debugfs/tracing/tracing_max_latency
+ echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
(Note that kernel size and overhead increases with this option
enabled. This option and the irqs-off timing option can be
@@ -166,7 +177,7 @@ config PREEMPT_TRACER
config SYSPROF_TRACER
bool "Sysprof Tracer"
depends on X86
- select TRACING
+ select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
This tracer provides the trace needed by the 'Sysprof' userspace
@@ -174,44 +185,33 @@ config SYSPROF_TRACER
config SCHED_TRACER
bool "Scheduling Latency Tracer"
- select TRACING
+ select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE
help
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
-config ENABLE_CONTEXT_SWITCH_TRACER
- bool "Trace process context switches"
- select TRACING
- select CONTEXT_SWITCH_TRACER
- help
- This tracer gets called from the context switch and records
- all switching of tasks.
-
-config ENABLE_EVENT_TRACING
- bool "Trace various events in the kernel"
+config ENABLE_DEFAULT_TRACERS
+ bool "Trace process context switches and events"
+ depends on !GENERIC_TRACER
select TRACING
help
This tracer hooks to various trace points in the kernel
allowing the user to pick and choose which trace point they
- want to trace.
-
- Note, all tracers enable event tracing. This option is
- only a convenience to enable event tracing when no other
- tracers are selected.
+ want to trace. It also includes the sched_switch tracer plugin.
config FTRACE_SYSCALLS
bool "Trace syscalls"
depends on HAVE_FTRACE_SYSCALLS
- select TRACING
+ select GENERIC_TRACER
select KALLSYMS
help
Basic tracer to catch the syscall entry and exit events.
config BOOT_TRACER
bool "Trace boot initcalls"
- select TRACING
+ select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
This tracer helps developers to optimize boot times: it records
@@ -228,7 +228,7 @@ config BOOT_TRACER
config TRACE_BRANCH_PROFILING
bool
- select TRACING
+ select GENERIC_TRACER
choice
prompt "Branch Profiling"
@@ -261,7 +261,7 @@ config PROFILE_ANNOTATED_BRANCHES
This tracer profiles all the the likely and unlikely macros
in the kernel. It will display the results in:
- /debugfs/tracing/profile_annotated_branch
+ /sys/kernel/debug/tracing/profile_annotated_branch
Note: this will add a significant overhead, only turn this
on if you need to profile the system's use of these macros.
@@ -274,7 +274,7 @@ config PROFILE_ALL_BRANCHES
taken in the kernel is recorded whether it hit or miss.
The results will be displayed in:
- /debugfs/tracing/profile_branch
+ /sys/kernel/debug/tracing/profile_branch
This option also enables the likely/unlikely profiler.
@@ -308,7 +308,7 @@ config BRANCH_TRACER
config POWER_TRACER
bool "Trace power consumption behavior"
depends on X86
- select TRACING
+ select GENERIC_TRACER
help
This tracer helps developers to analyze and optimize the kernels
power management decisions, specifically the C-state and P-state
@@ -344,7 +344,7 @@ config STACK_TRACER
select KALLSYMS
help
This special tracer records the maximum stack footprint of the
- kernel and displays it in debugfs/tracing/stack_trace.
+ kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
This tracer works by hooking into every function call that the
kernel executes, and keeping a maximum stack depth value and
@@ -363,14 +363,14 @@ config STACK_TRACER
config HW_BRANCH_TRACER
depends on HAVE_HW_BRANCH_TRACER
bool "Trace hw branches"
- select TRACING
+ select GENERIC_TRACER
help
This tracer records all branches on the system in a circular
buffer giving access to the last N branches for each cpu.
config KMEMTRACE
bool "Trace SLAB allocations"
- select TRACING
+ select GENERIC_TRACER
help
kmemtrace provides tracing for slab allocator functions, such as
kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
@@ -390,7 +390,7 @@ config KMEMTRACE
config WORKQUEUE_TRACER
bool "Trace workqueues"
- select TRACING
+ select GENERIC_TRACER
help
The workqueue tracer provides some statistical informations
about each cpu workqueue thread such as the number of the
@@ -406,7 +406,7 @@ config BLK_DEV_IO_TRACE
select RELAY
select DEBUG_FS
select TRACEPOINTS
- select TRACING
+ select GENERIC_TRACER
select STACKTRACE
help
Say Y here if you want to be able to trace the block layer actions
@@ -467,7 +467,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace"
- depends on TRACING
+ depends on GENERIC_TRACER
select FTRACE_SELFTEST
help
This option performs a series of startup tests on ftrace. On bootup
@@ -478,7 +478,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
- select TRACING
+ select GENERIC_TRACER
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 658aace8c41e..ce3b1cd02732 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -45,7 +45,10 @@ obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
obj-$(CONFIG_POWER_TRACER) += trace_power.o
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
-obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
+obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
+ifeq ($(CONFIG_BLOCK),y)
+obj-$(CONFIG_EVENT_TRACING) += blktrace.o
+endif
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index e3abf55bc8e5..39af8af6fc30 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -23,10 +23,14 @@
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/time.h>
-#include <trace/block.h>
#include <linux/uaccess.h>
+
+#include <trace/events/block.h>
+
#include "trace_output.h"
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+
static unsigned int blktrace_seq __read_mostly = 1;
static struct trace_array *blk_tr;
@@ -665,12 +669,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC);
- __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors,
- rq->cmd_len, rq->cmd);
+ __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
+ what, rq->errors, rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
- rw, what, rq->errors, 0, NULL);
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
+ what, rq->errors, 0, NULL);
}
}
@@ -877,11 +881,11 @@ void blk_add_driver_data(struct request_queue *q,
return;
if (blk_pc_request(rq))
- __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
- rq->errors, len, data);
+ __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
+ BLK_TA_DRV_DATA, rq->errors, len, data);
else
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
- 0, BLK_TA_DRV_DATA, rq->errors, len, data);
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
+ BLK_TA_DRV_DATA, rq->errors, len, data);
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1658,3 +1662,72 @@ int blk_trace_init_sysfs(struct device *dev)
return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
}
+#endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+#ifdef CONFIG_EVENT_TRACING
+
+void blk_dump_cmd(char *buf, struct request *rq)
+{
+ int i, end;
+ int len = rq->cmd_len;
+ unsigned char *cmd = rq->cmd;
+
+ if (!blk_pc_request(rq)) {
+ buf[0] = '\0';
+ return;
+ }
+
+ for (end = len - 1; end >= 0; end--)
+ if (cmd[end])
+ break;
+ end++;
+
+ for (i = 0; i < len; i++) {
+ buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
+ if (i == end && end != len - 1) {
+ sprintf(buf, " ..");
+ break;
+ }
+ }
+}
+
+void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
+{
+ int i = 0;
+
+ if (rw & WRITE)
+ rwbs[i++] = 'W';
+ else if (rw & 1 << BIO_RW_DISCARD)
+ rwbs[i++] = 'D';
+ else if (bytes)
+ rwbs[i++] = 'R';
+ else
+ rwbs[i++] = 'N';
+
+ if (rw & 1 << BIO_RW_AHEAD)
+ rwbs[i++] = 'A';
+ if (rw & 1 << BIO_RW_BARRIER)
+ rwbs[i++] = 'B';
+ if (rw & 1 << BIO_RW_SYNCIO)
+ rwbs[i++] = 'S';
+ if (rw & 1 << BIO_RW_META)
+ rwbs[i++] = 'M';
+
+ rwbs[i] = '\0';
+}
+
+void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
+{
+ int rw = rq->cmd_flags & 0x03;
+ int bytes;
+
+ if (blk_discard_rq(rq))
+ rw |= (1 << BIO_RW_DISCARD);
+
+ bytes = blk_rq_bytes(rq);
+
+ blk_fill_rwbs(rwbs, rw, bytes);
+}
+
+#endif /* CONFIG_EVENT_TRACING */
+
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 140699a9a8a7..bb60732ade0c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -32,6 +32,7 @@
#include <trace/events/sched.h>
#include <asm/ftrace.h>
+#include <asm/setup.h>
#include "trace_output.h"
#include "trace_stat.h"
@@ -598,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats);
- if (!stat->hash)
+ if (!stat->hash || !ftrace_profile_enabled)
goto out;
rec = ftrace_find_profiled_func(stat, ip);
@@ -629,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats);
- if (!stat->hash)
+ if (!stat->hash || !ftrace_profile_enabled)
goto out;
calltime = trace->rettime - trace->calltime;
@@ -723,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 1;
} else {
ftrace_profile_enabled = 0;
+ /*
+ * unregister_ftrace_profiler calls stop_machine
+ * so this acts like an synchronize_sched.
+ */
unregister_ftrace_profiler();
}
}
@@ -2369,6 +2374,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
ftrace_set_regex(buf, len, reset, 0);
}
+/*
+ * command line interface to allow users to set filters on boot up.
+ */
+#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
+static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
+
+static int __init set_ftrace_notrace(char *str)
+{
+ strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
+ return 1;
+}
+__setup("ftrace_notrace=", set_ftrace_notrace);
+
+static int __init set_ftrace_filter(char *str)
+{
+ strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
+ return 1;
+}
+__setup("ftrace_filter=", set_ftrace_filter);
+
+static void __init set_ftrace_early_filter(char *buf, int enable)
+{
+ char *func;
+
+ while (buf) {
+ func = strsep(&buf, ",");
+ ftrace_set_regex(func, strlen(func), 0, enable);
+ }
+}
+
+static void __init set_ftrace_early_filters(void)
+{
+ if (ftrace_filter_buf[0])
+ set_ftrace_early_filter(ftrace_filter_buf, 1);
+ if (ftrace_notrace_buf[0])
+ set_ftrace_early_filter(ftrace_notrace_buf, 0);
+}
+
static int
ftrace_regex_release(struct inode *inode, struct file *file, int enable)
{
@@ -2829,6 +2873,8 @@ void __init ftrace_init(void)
if (ret)
pr_warning("Failed to register trace ftrace module notifier\n");
+ set_ftrace_early_filters();
+
return;
failed:
ftrace_disabled = 1;
@@ -3172,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
if (t->ret_stack == NULL) {
- t->curr_ret_stack = -1;
- /* Make sure IRQs see the -1 first: */
- barrier();
- t->ret_stack = ret_stack_list[start++];
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
+ t->curr_ret_stack = -1;
+ /* Make sure the tasks see the -1 first: */
+ smp_wmb();
+ t->ret_stack = ret_stack_list[start++];
}
} while_each_thread(g, t);
@@ -3235,8 +3281,10 @@ static int start_graph_tracing(void)
return -ENOMEM;
/* The cpu_boot init_task->ret_stack will never be freed */
- for_each_online_cpu(cpu)
- ftrace_graph_init_task(idle_task(cpu));
+ for_each_online_cpu(cpu) {
+ if (!idle_task(cpu)->ret_stack)
+ ftrace_graph_init_task(idle_task(cpu));
+ }
do {
ret = alloc_retstack_tasklist(ret_stack_list);
@@ -3328,18 +3376,25 @@ void unregister_ftrace_graph(void)
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
+ /* Make sure we do not use the parent ret_stack */
+ t->ret_stack = NULL;
+
if (ftrace_graph_active) {
- t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+ struct ftrace_ret_stack *ret_stack;
+
+ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);
- if (!t->ret_stack)
+ if (!ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
- } else
- t->ret_stack = NULL;
+ /* make curr_ret_stack visable before we add the ret_stack */
+ smp_wmb();
+ t->ret_stack = ret_stack;
+ }
}
void ftrace_graph_exit_task(struct task_struct *t)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 16b24d49604c..dc4dc70171ce 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -10,6 +10,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
@@ -370,6 +371,9 @@ static inline int test_time_stamp(u64 delta)
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
+/* Max number of timestamps that can fit on a page */
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
+
int ring_buffer_print_page_header(struct trace_seq *s)
{
struct buffer_data_page field;
@@ -423,6 +427,8 @@ struct ring_buffer {
atomic_t record_disabled;
cpumask_var_t cpumask;
+ struct lock_class_key *reader_lock_key;
+
struct mutex mutex;
struct ring_buffer_per_cpu **buffers;
@@ -562,6 +568,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
cpu_buffer->cpu = cpu;
cpu_buffer->buffer = buffer;
spin_lock_init(&cpu_buffer->reader_lock);
+ lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&cpu_buffer->pages);
@@ -632,7 +639,8 @@ static int rb_cpu_notify(struct notifier_block *self,
* when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head.
*/
-struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
+struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ struct lock_class_key *key)
{
struct ring_buffer *buffer;
int bsize;
@@ -655,6 +663,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
buffer->flags = flags;
buffer->clock = trace_clock_local;
+ buffer->reader_lock_key = key;
/* need at least two pages */
if (buffer->pages == 1)
@@ -712,7 +721,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
kfree(buffer);
return NULL;
}
-EXPORT_SYMBOL_GPL(ring_buffer_alloc);
+EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
/**
* ring_buffer_free - free a ring buffer.
@@ -1262,6 +1271,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail);
+ kmemcheck_annotate_bitfield(event, bitfield);
rb_event_set_padding(event);
}
@@ -1319,6 +1329,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
event = __rb_page_index(tail_page, tail);
+ kmemcheck_annotate_bitfield(event, bitfield);
rb_update_event(event, type, length);
/* The passed in type is zero for DATA */
@@ -1335,6 +1346,38 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
return event;
}
+static inline int
+rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
+ struct ring_buffer_event *event)
+{
+ unsigned long new_index, old_index;
+ struct buffer_page *bpage;
+ unsigned long index;
+ unsigned long addr;
+
+ new_index = rb_event_index(event);
+ old_index = new_index + rb_event_length(event);
+ addr = (unsigned long)event;
+ addr &= PAGE_MASK;
+
+ bpage = cpu_buffer->tail_page;
+
+ if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
+ /*
+ * This is on the tail page. It is possible that
+ * a write could come in and move the tail page
+ * and write to the next page. That is fine
+ * because we just shorten what is on this page.
+ */
+ index = local_cmpxchg(&bpage->write, old_index, new_index);
+ if (index == old_index)
+ return 1;
+ }
+
+ /* could not discard */
+ return 0;
+}
+
static int
rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
u64 *ts, u64 *delta)
@@ -1377,16 +1420,23 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
event->array[0] = *delta >> TS_SHIFT;
} else {
cpu_buffer->commit_page->page->time_stamp = *ts;
- event->time_delta = 0;
- event->array[0] = 0;
+ /* try to discard, since we do not need this */
+ if (!rb_try_to_discard(cpu_buffer, event)) {
+ /* nope, just zero it */
+ event->time_delta = 0;
+ event->array[0] = 0;
+ }
}
cpu_buffer->write_stamp = *ts;
/* let the caller know this was the commit */
ret = 1;
} else {
- /* Darn, this is just wasted space */
- event->time_delta = 0;
- event->array[0] = 0;
+ /* Try to discard the event */
+ if (!rb_try_to_discard(cpu_buffer, event)) {
+ /* Darn, this is just wasted space */
+ event->time_delta = 0;
+ event->array[0] = 0;
+ }
ret = 0;
}
@@ -1682,10 +1732,6 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long new_index, old_index;
- struct buffer_page *bpage;
- unsigned long index;
- unsigned long addr;
int cpu;
/* The event is discarded regardless */
@@ -1701,24 +1747,8 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
cpu = smp_processor_id();
cpu_buffer = buffer->buffers[cpu];
- new_index = rb_event_index(event);
- old_index = new_index + rb_event_length(event);
- addr = (unsigned long)event;
- addr &= PAGE_MASK;
-
- bpage = cpu_buffer->tail_page;
-
- if (bpage == (void *)addr && rb_page_write(bpage) == old_index) {
- /*
- * This is on the tail page. It is possible that
- * a write could come in and move the tail page
- * and write to the next page. That is fine
- * because we just shorten what is on this page.
- */
- index = local_cmpxchg(&bpage->write, old_index, new_index);
- if (index == old_index)
- goto out;
- }
+ if (!rb_try_to_discard(cpu_buffer, event))
+ goto out;
/*
* The commit is still visible by the reader, so we
@@ -2253,8 +2283,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
* Check if we are at the end of the buffer.
*/
if (iter->head >= rb_page_size(iter->head_page)) {
- if (RB_WARN_ON(buffer,
- iter->head_page == cpu_buffer->commit_page))
+ /* discarded commits can make the page empty */
+ if (iter->head_page == cpu_buffer->commit_page)
return;
rb_inc_iter(iter);
return;
@@ -2297,12 +2327,10 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
/*
* We repeat when a timestamp is encountered. It is possible
* to get multiple timestamps from an interrupt entering just
- * as one timestamp is about to be written. The max times
- * that this can happen is the number of nested interrupts we
- * can have. Nesting 10 deep of interrupts is clearly
- * an anomaly.
+ * as one timestamp is about to be written, or from discarded
+ * commits. The most that we can have is the number on a single page.
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
return NULL;
reader = rb_get_reader_page(cpu_buffer);
@@ -2368,14 +2396,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
again:
/*
- * We repeat when a timestamp is encountered. It is possible
- * to get multiple timestamps from an interrupt entering just
- * as one timestamp is about to be written. The max times
- * that this can happen is the number of nested interrupts we
- * can have. Nesting 10 deep of interrupts is clearly
- * an anomaly.
+ * We repeat when a timestamp is encountered.
+ * We can get multiple timestamps by nested interrupts or also
+ * if filtering is on (discarding commits). Since discarding
+ * commits can be frequent we can get a lot of timestamps.
+ * But we limit them by not adding timestamps if they begin
+ * at the start of a page.
*/
- if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
+ if (RB_WARN_ON(cpu_buffer, ++nr_loops > RB_TIMESTAMPS_PER_PAGE))
return NULL;
if (rb_per_cpu_empty(cpu_buffer))
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a3a8a87d7e91..c1878bfb2e1e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -344,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock =
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
- * for later retrieval via /debugfs/tracing/latency_trace)
+ * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
@@ -2414,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = {
static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
- "# mkdir /debug\n"
- "# mount -t debugfs nodev /debug\n\n"
- "# cat /debug/tracing/available_tracers\n"
+ "# mount -t debugfs nodev /sys/kernel/debug\n\n"
+ "# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
- "# cat /debug/tracing/current_tracer\n"
+ "# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
- "# echo sched_switch > /debug/tracing/current_tracer\n"
- "# cat /debug/tracing/current_tracer\n"
+ "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
+ "# cat /sys/kernel/debug/tracing/current_tracer\n"
"sched_switch\n"
- "# cat /debug/tracing/trace_options\n"
+ "# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
- "# echo print-parent > /debug/tracing/trace_options\n"
- "# echo 1 > /debug/tracing/tracing_enabled\n"
- "# cat /debug/tracing/trace > /tmp/trace.txt\n"
- "echo 0 > /debug/tracing/tracing_enabled\n"
+ "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
+ "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n"
+ "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
+ "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n"
;
static ssize_t
@@ -2826,6 +2825,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
+ if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ iter->iter_flags |= TRACE_FILE_LAT_FMT;
+
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6c81f9c21426..aa08be69a1b6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1050,12 +1050,13 @@ static void trace_module_remove_events(struct module *mod)
struct ftrace_event_call *call, *p;
bool found = false;
+ down_write(&trace_event_mutex);
list_for_each_entry_safe(call, p, &ftrace_events, list) {
if (call->mod == mod) {
found = true;
ftrace_event_enable_disable(call, 0);
if (call->event)
- unregister_ftrace_event(call->event);
+ __unregister_ftrace_event(call->event);
debugfs_remove_recursive(call->dir);
list_del(&call->list);
trace_destroy_fields(call);
@@ -1079,6 +1080,7 @@ static void trace_module_remove_events(struct module *mod)
*/
if (found)
tracing_reset_current_online_cpus();
+ up_write(&trace_event_mutex);
}
static int trace_module_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index a7430b16d243..db6e54bdb596 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -478,12 +478,12 @@ enum {
static int is_string_field(const char *type)
{
+ if (strstr(type, "__data_loc") && strstr(type, "char"))
+ return FILTER_DYN_STRING;
+
if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING;
- if (!strcmp(type, "__str_loc"))
- return FILTER_DYN_STRING;
-
return 0;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 10f6ad7d85f6..8b592418d8b2 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
if (!current->ret_stack)
return -EBUSY;
+ /*
+ * We must make sure the ret_stack is tested before we read
+ * anything else.
+ */
+ smp_rmb();
+
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index c12d95db2f56..7938f3ae93e3 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -14,9 +14,10 @@
/* must be a power of 2 */
#define EVENT_HASHSIZE 128
-static DECLARE_RWSEM(trace_event_mutex);
+DECLARE_RWSEM(trace_event_mutex);
DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
+EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
@@ -99,6 +100,38 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
}
EXPORT_SYMBOL_GPL(trace_seq_printf);
+/**
+ * trace_seq_vprintf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
+int
+trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+{
+ int len = (PAGE_SIZE - 1) - s->len;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = vsnprintf(s->buffer + s->len, len, fmt, args);
+
+ /* If we can't write it all, don't bother writing anything */
+ if (ret >= len)
+ return 0;
+
+ s->len += ret;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_vprintf);
+
int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{
int len = (PAGE_SIZE - 1) - s->len;
@@ -222,10 +255,9 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
{
unsigned long mask;
const char *str;
+ const char *ret = p->buffer + p->len;
int i;
- trace_seq_init(p);
-
for (i = 0; flag_array[i].name && flags; i++) {
mask = flag_array[i].mask;
@@ -248,16 +280,16 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
trace_seq_putc(p, 0);
- return p->buffer;
+ return ret;
}
+EXPORT_SYMBOL(ftrace_print_flags_seq);
const char *
ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
const struct trace_print_flags *symbol_array)
{
int i;
-
- trace_seq_init(p);
+ const char *ret = p->buffer + p->len;
for (i = 0; symbol_array[i].name; i++) {
@@ -273,8 +305,9 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
trace_seq_putc(p, 0);
- return p->buffer;
+ return ret;
}
+EXPORT_SYMBOL(ftrace_print_symbols_seq);
#ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name)
@@ -386,17 +419,20 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
if (ip == ULONG_MAX || !ret)
break;
- if (i && ret)
- ret = trace_seq_puts(s, " <- ");
+ if (ret)
+ ret = trace_seq_puts(s, " => ");
if (!ip) {
if (ret)
ret = trace_seq_puts(s, "??");
+ if (ret)
+ ret = trace_seq_puts(s, "\n");
continue;
}
if (!ret)
break;
if (ret)
ret = seq_print_user_ip(s, mm, ip, sym_flags);
+ ret = trace_seq_puts(s, "\n");
}
if (mm)
@@ -666,6 +702,16 @@ int register_ftrace_event(struct trace_event *event)
}
EXPORT_SYMBOL_GPL(register_ftrace_event);
+/*
+ * Used by module code with the trace_event_mutex held for write.
+ */
+int __unregister_ftrace_event(struct trace_event *event)
+{
+ hlist_del(&event->node);
+ list_del(&event->list);
+ return 0;
+}
+
/**
* unregister_ftrace_event - remove a no longer used event
* @event: the event to remove
@@ -673,8 +719,7 @@ EXPORT_SYMBOL_GPL(register_ftrace_event);
int unregister_ftrace_event(struct trace_event *event)
{
down_write(&trace_event_mutex);
- hlist_del(&event->node);
- list_del(&event->list);
+ __unregister_ftrace_event(event);
up_write(&trace_event_mutex);
return 0;
@@ -972,16 +1017,16 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent);
+ if (!trace_seq_puts(s, "<stack trace>\n"))
+ goto partial;
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
- if (!field->caller[i])
+ if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
break;
- if (i) {
- if (!trace_seq_puts(s, " <= "))
- goto partial;
+ if (!trace_seq_puts(s, " => "))
+ goto partial;
- if (!seq_print_ip_sym(s, field->caller[i], flags))
- goto partial;
- }
+ if (!seq_print_ip_sym(s, field->caller[i], flags))
+ goto partial;
if (!trace_seq_puts(s, "\n"))
goto partial;
}
@@ -1009,10 +1054,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent);
- if (!seq_print_userip_objs(field, s, flags))
+ if (!trace_seq_puts(s, "<user stack trace>\n"))
goto partial;
- if (!trace_seq_putc(s, '\n'))
+ if (!seq_print_userip_objs(field, s, flags))
goto partial;
return TRACE_TYPE_HANDLED;
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h
index ac240e76eb01..d38bec4a9c30 100644
--- a/kernel/trace/trace_output.h
+++ b/kernel/trace/trace_output.h
@@ -27,6 +27,10 @@ extern struct trace_event *ftrace_find_event(int type);
extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
int flags);
+/* used by module unregistering */
+extern int __unregister_ftrace_event(struct trace_event *event);
+extern struct rw_semaphore trace_event_mutex;
+
#define MAX_MEMHEX_BYTES 8
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 1796f00524e1..2d7aebd71dbd 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -265,7 +265,7 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, " Depth Size Location"
" (%d entries)\n"
" ----- ---- --------\n",
- max_stack_trace.nr_entries);
+ max_stack_trace.nr_entries - 1);
if (!stack_tracer_enabled && !max_stack_size)
print_disabled(m);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index e04b76cc238a..f6693969287d 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -203,7 +203,8 @@ static void start_stack_timer(void *unused)
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = stack_trace_timer_fn;
- hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
}
static void start_stack_timers(void)
diff --git a/kernel/user.c b/kernel/user.c
index 850e0ba41c1e..2c000e7132ac 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -75,21 +75,6 @@ static void uid_hash_remove(struct user_struct *up)
put_user_ns(up->user_ns);
}
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
-{
- struct user_struct *user;
- struct hlist_node *h;
-
- hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if (user->uid == uid) {
- atomic_inc(&user->__count);
- return user;
- }
- }
-
- return NULL;
-}
-
#ifdef CONFIG_USER_SCHED
static void sched_destroy_user(struct user_struct *up)
@@ -119,6 +104,23 @@ static int sched_create_user(struct user_struct *up) { return 0; }
#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ /* possibly resurrect an "almost deleted" object */
+ if (atomic_inc_return(&user->__count) == 1)
+ cancel_delayed_work(&user->work);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex);
@@ -283,12 +285,12 @@ int __init uids_sysfs_init(void)
return uids_user_create(&root_user);
}
-/* work function to remove sysfs directory for a user and free up
+/* delayed work function to remove sysfs directory for a user and free up
* corresponding structures.
*/
static void cleanup_user_struct(struct work_struct *w)
{
- struct user_struct *up = container_of(w, struct user_struct, work);
+ struct user_struct *up = container_of(w, struct user_struct, work.work);
unsigned long flags;
int remove_user = 0;
@@ -297,15 +299,12 @@ static void cleanup_user_struct(struct work_struct *w)
*/
uids_mutex_lock();
- local_irq_save(flags);
-
- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
+ spin_lock_irqsave(&uidhash_lock, flags);
+ if (atomic_read(&up->__count) == 0) {
uid_hash_remove(up);
remove_user = 1;
- spin_unlock_irqrestore(&uidhash_lock, flags);
- } else {
- local_irq_restore(flags);
}
+ spin_unlock_irqrestore(&uidhash_lock, flags);
if (!remove_user)
goto done;
@@ -331,16 +330,28 @@ done:
*/
static void free_user(struct user_struct *up, unsigned long flags)
{
- /* restore back the count */
- atomic_inc(&up->__count);
spin_unlock_irqrestore(&uidhash_lock, flags);
-
- INIT_WORK(&up->work, cleanup_user_struct);
- schedule_work(&up->work);
+ INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
+ schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
}
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
+static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+{
+ struct user_struct *user;
+ struct hlist_node *h;
+
+ hlist_for_each_entry(user, h, hashent, uidhash_node) {
+ if (user->uid == uid) {
+ atomic_inc(&user->__count);
+ return user;
+ }
+ }
+
+ return NULL;
+}
+
int uids_sysfs_init(void) { return 0; }
static inline int uids_user_create(struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }
diff --git a/kernel/wait.c b/kernel/wait.c
index 42a2dbc181c8..ea7c3b4275cf 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -154,7 +154,7 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
if (!list_empty(&wait->task_list))
list_del_init(&wait->task_list);
else if (waitqueue_active(q))
- __wake_up_common(q, mode, 1, 0, key);
+ __wake_up_locked_key(q, mode, key);
spin_unlock_irqrestore(&q->lock, flags);
}
EXPORT_SYMBOL(abort_exclusive_wait);
OpenPOWER on IntegriCloud