summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/context_tracking.c83
-rw-r--r--kernel/cpu.c13
-rw-r--r--kernel/events/hw_breakpoint.c12
-rw-r--r--kernel/events/uprobes.c43
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c59
-rw-r--r--kernel/irq/chip.c1
-rw-r--r--kernel/irq/irqdomain.c4
-rw-r--r--kernel/irq/manage.c41
-rw-r--r--kernel/irq/resend.c8
-rw-r--r--kernel/ksysfs.c23
-rw-r--r--kernel/lockdep_proc.c2
-rw-r--r--kernel/modsign_pubkey.c4
-rw-r--r--kernel/module_signing.c14
-rw-r--r--kernel/posix-cpu-timers.c24
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/qos.c65
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/printk.c12
-rw-r--r--kernel/rcu.h2
-rw-r--r--kernel/rcupdate.c3
-rw-r--r--kernel/rcutiny.c2
-rw-r--r--kernel/rcutiny_plugin.h5
-rw-r--r--kernel/rcutorture.c54
-rw-r--r--kernel/rcutree.c347
-rw-r--r--kernel/rcutree.h67
-rw-r--r--kernel/rcutree_plugin.h415
-rw-r--r--kernel/rcutree_trace.c330
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/sched/cputime.c131
-rw-r--r--kernel/sched/debug.c36
-rw-r--r--kernel/sched/fair.c914
-rw-r--r--kernel/sched/features.h5
-rw-r--r--kernel/sched/sched.h60
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/srcu.c16
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/time/jiffies.c8
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c137
-rw-r--r--kernel/time/timekeeping.c14
-rw-r--r--kernel/trace/Kconfig1
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c65
-rw-r--r--kernel/trace/trace.c411
-rw-r--r--kernel/trace/trace.h18
-rw-r--r--kernel/trace/trace_branch.c4
-rw-r--r--kernel/trace/trace_events.c51
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_functions.c5
-rw-r--r--kernel/trace/trace_functions_graph.c6
-rw-r--r--kernel/trace/trace_irqsoff.c14
-rw-r--r--kernel/trace/trace_kprobe.c10
-rw-r--r--kernel/trace/trace_output.c78
-rw-r--r--kernel/trace/trace_probe.c14
-rw-r--r--kernel/trace/trace_sched_switch.c4
-rw-r--r--kernel/trace/trace_sched_wakeup.c10
-rw-r--r--kernel/trace/trace_selftest.c13
-rw-r--r--kernel/trace/trace_syscalls.c61
-rw-r--r--kernel/trace/trace_uprobe.c4
-rw-r--r--kernel/watchdog.c7
-rw-r--r--kernel/workqueue.c22
67 files changed, 2700 insertions, 1138 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 86e3285ae7e5..ac0d533eb7de 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -110,6 +110,7 @@ obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
obj-$(CONFIG_PADATA) += padata.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
$(obj)/configs.o: $(obj)/config_data.h
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 2f186ed80c40..fc7376bf86ea 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1159,7 +1159,7 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
cred = current_cred();
spin_lock_irq(&tsk->sighand->siglock);
- if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
+ if (tsk->signal && tsk->signal->tty)
tty = tsk->signal->tty->name;
else
tty = "(none)";
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
new file mode 100644
index 000000000000..e0e07fd55508
--- /dev/null
+++ b/kernel/context_tracking.c
@@ -0,0 +1,83 @@
+#include <linux/context_tracking.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+
+struct context_tracking {
+ /*
+ * When active is false, hooks are not set to
+ * minimize overhead: TIF flags are cleared
+ * and calls to user_enter/exit are ignored. This
+ * may be further optimized using static keys.
+ */
+ bool active;
+ enum {
+ IN_KERNEL = 0,
+ IN_USER,
+ } state;
+};
+
+static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+ .active = true,
+#endif
+};
+
+void user_enter(void)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ WARN_ON_ONCE(!current->mm);
+
+ local_irq_save(flags);
+ if (__this_cpu_read(context_tracking.active) &&
+ __this_cpu_read(context_tracking.state) != IN_USER) {
+ __this_cpu_write(context_tracking.state, IN_USER);
+ rcu_user_enter();
+ }
+ local_irq_restore(flags);
+}
+
+void user_exit(void)
+{
+ unsigned long flags;
+
+ /*
+ * Some contexts may involve an exception occuring in an irq,
+ * leading to that nesting:
+ * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
+ * This would mess up the dyntick_nesting count though. And rcu_irq_*()
+ * helpers are enough to protect RCU uses inside the exception. So
+ * just return immediately if we detect we are in an IRQ.
+ */
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+ if (__this_cpu_read(context_tracking.state) == IN_USER) {
+ __this_cpu_write(context_tracking.state, IN_KERNEL);
+ rcu_user_exit();
+ }
+ local_irq_restore(flags);
+}
+
+void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (__this_cpu_read(context_tracking.active)) {
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
+ set_tsk_thread_flag(next, TIF_NOHZ);
+ }
+}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 42bd331ee0ab..3046a503242c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -348,11 +348,13 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
struct task_struct *idle;
- if (cpu_online(cpu) || !cpu_present(cpu))
- return -EINVAL;
-
cpu_hotplug_begin();
+ if (cpu_online(cpu) || !cpu_present(cpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
idle = idle_thread_get(cpu);
if (IS_ERR(idle)) {
ret = PTR_ERR(idle);
@@ -601,6 +603,11 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
static int __init cpu_hotplug_pm_sync_init(void)
{
+ /*
+ * cpu_hotplug_pm_callback has higher priority than x86
+ * bsp_pm_callback which depends on cpu_hotplug_pm_callback
+ * to disable cpu hotplug to avoid cpu hotplug race.
+ */
pm_notifier(cpu_hotplug_pm_callback, 0);
return 0;
}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9a7b487c6fe2..fe8a916507ed 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
* Count the number of breakpoints of the same type and same task.
* The given event must be not on the list.
*/
-static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
+static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{
struct task_struct *tsk = bp->hw.bp_target;
struct perf_event *iter;
int count = 0;
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
- if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
+ if (iter->hw.bp_target == tsk &&
+ find_slot_idx(iter) == type &&
+ cpu == iter->cpu)
count += hw_breakpoint_weight(iter);
}
@@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
if (!tsk)
slots->pinned += max_task_bp_pinned(cpu, type);
else
- slots->pinned += task_bp_pinned(bp, type);
+ slots->pinned += task_bp_pinned(cpu, bp, type);
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
return;
@@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
if (!tsk)
nr += max_task_bp_pinned(cpu, type);
else
- nr += task_bp_pinned(bp, type);
+ nr += task_bp_pinned(cpu, bp, type);
if (nr > slots->pinned)
slots->pinned = nr;
@@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
int old_idx = 0;
int idx = 0;
- old_count = task_bp_pinned(bp, type);
+ old_count = task_bp_pinned(cpu, bp, type);
old_idx = old_count - 1;
idx = old_idx + weight;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 5cc4e7e42e68..dea7acfbb071 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -33,6 +33,7 @@
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
+#include <linux/percpu-rwsem.h>
#include <linux/uprobes.h>
@@ -71,6 +72,8 @@ static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
+static struct percpu_rw_semaphore dup_mmap_sem;
+
/*
* uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
* events active at this time. Probably a fine grained per inode count is
@@ -766,10 +769,13 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
struct map_info *info;
int err = 0;
+ percpu_down_write(&dup_mmap_sem);
info = build_map_info(uprobe->inode->i_mapping,
uprobe->offset, is_register);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ if (IS_ERR(info)) {
+ err = PTR_ERR(info);
+ goto out;
+ }
while (info) {
struct mm_struct *mm = info->mm;
@@ -799,7 +805,8 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
mmput(mm);
info = free_map_info(info);
}
-
+ out:
+ percpu_up_write(&dup_mmap_sem);
return err;
}
@@ -1131,6 +1138,16 @@ void uprobe_clear_state(struct mm_struct *mm)
kfree(area);
}
+void uprobe_start_dup_mmap(void)
+{
+ percpu_down_read(&dup_mmap_sem);
+}
+
+void uprobe_end_dup_mmap(void)
+{
+ percpu_up_read(&dup_mmap_sem);
+}
+
void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
newmm->uprobes_state.xol_area = NULL;
@@ -1199,6 +1216,11 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe, unsigned long slot
vaddr = kmap_atomic(area->page);
memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
kunmap_atomic(vaddr);
+ /*
+ * We probably need flush_icache_user_range() but it needs vma.
+ * This should work on supported architectures too.
+ */
+ flush_dcache_page(area->page);
return current->utask->xol_vaddr;
}
@@ -1430,16 +1452,6 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
return uprobe;
}
-void __weak arch_uprobe_enable_step(struct arch_uprobe *arch)
-{
- user_enable_single_step(current);
-}
-
-void __weak arch_uprobe_disable_step(struct arch_uprobe *arch)
-{
- user_disable_single_step(current);
-}
-
/*
* Run handler and ask thread to singlestep.
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@@ -1493,7 +1505,6 @@ static void handle_swbp(struct pt_regs *regs)
goto out;
if (!pre_ssout(uprobe, regs, bp_vaddr)) {
- arch_uprobe_enable_step(&uprobe->arch);
utask->active_uprobe = uprobe;
utask->state = UTASK_SSTEP;
return;
@@ -1525,7 +1536,6 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
else
WARN_ON_ONCE(1);
- arch_uprobe_disable_step(&uprobe->arch);
put_uprobe(uprobe);
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
@@ -1604,6 +1614,9 @@ static int __init init_uprobes(void)
mutex_init(&uprobes_mmap_mutex[i]);
}
+ if (percpu_init_rwsem(&dup_mmap_sem))
+ return -ENOMEM;
+
return register_die_notifier(&uprobe_exception_nb);
}
module_init(init_uprobes);
diff --git a/kernel/exit.c b/kernel/exit.c
index 346616c0092c..618f7ee56003 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1186,11 +1186,11 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* as other threads in the parent group can be right
* here reaping other children at the same time.
*
- * We use thread_group_times() to get times for the thread
+ * We use thread_group_cputime_adjusted() to get times for the thread
* group, which consolidates times for all threads in the
* group including the group leader.
*/
- thread_group_times(p, &tgutime, &tgstime);
+ thread_group_cputime_adjusted(p, &tgutime, &tgstime);
spin_lock_irq(&p->real_parent->sighand->siglock);
psig = p->real_parent->signal;
sig = p->signal;
diff --git a/kernel/fork.c b/kernel/fork.c
index 8b20ab7d3aa2..850dde1e0c84 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -352,6 +352,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
unsigned long charge;
struct mempolicy *pol;
+ uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
uprobe_dup_mmap(oldmm, mm);
@@ -469,6 +470,7 @@ out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
+ uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
@@ -1222,7 +1224,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
- p->prev_utime = p->prev_stime = 0;
+ p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
diff --git a/kernel/futex.c b/kernel/futex.c
index 3717e7b306e0..19eb089ca003 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
- int lock_taken, ret, ownerdied = 0;
+ int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
@@ -755,17 +755,15 @@ retry:
newval = curval | FUTEX_WAITERS;
/*
- * There are two cases, where a futex might have no owner (the
- * owner TID is 0): OWNER_DIED. We take over the futex in this
- * case. We also do an unconditional take over, when the owner
- * of the futex died.
- *
- * This is safe as we are protected by the hash bucket lock !
+ * Should we force take the futex? See below.
*/
- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
- /* Keep the OWNER_DIED bit */
+ if (unlikely(force_take)) {
+ /*
+ * Keep the OWNER_DIED and the WAITERS bit and set the
+ * new TID value.
+ */
newval = (curval & ~FUTEX_TID_MASK) | vpid;
- ownerdied = 0;
+ force_take = 0;
lock_taken = 1;
}
@@ -775,7 +773,7 @@ retry:
goto retry;
/*
- * We took the lock due to owner died take over.
+ * We took the lock due to forced take over.
*/
if (unlikely(lock_taken))
return 1;
@@ -790,20 +788,25 @@ retry:
switch (ret) {
case -ESRCH:
/*
- * No owner found for this futex. Check if the
- * OWNER_DIED bit is set to figure out whether
- * this is a robust futex or not.
+ * We failed to find an owner for this
+ * futex. So we have no pi_state to block
+ * on. This can happen in two cases:
+ *
+ * 1) The owner died
+ * 2) A stale FUTEX_WAITERS bit
+ *
+ * Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
- * We simply start over in case of a robust
- * futex. The code above will take the futex
- * and return happy.
+ * If the owner died or we have a stale
+ * WAITERS bit the owner TID in the user space
+ * futex is 0.
*/
- if (curval & FUTEX_OWNER_DIED) {
- ownerdied = 1;
+ if (!(curval & FUTEX_TID_MASK)) {
+ force_take = 1;
goto retry;
}
default:
@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q)
{
struct task_struct *p = q->task;
+ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
+ return;
+
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
@@ -1075,6 +1081,10 @@ retry_private:
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++ret >= nr_wake)
break;
@@ -1087,6 +1097,10 @@ retry_private:
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
@@ -1095,6 +1109,7 @@ retry_private:
ret += op_ret;
}
+out_unlock:
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
@@ -1384,9 +1399,13 @@ retry_private:
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
+ *
+ * We should never be requeueing a futex_q with a pi_state,
+ * which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
- (!requeue_pi && this->rt_waiter)) {
+ (!requeue_pi && this->rt_waiter) ||
+ this->pi_state) {
ret = -EINVAL;
break;
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 57d86d07221e..3aca9f29d30e 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -272,6 +272,7 @@ void handle_nested_irq(unsigned int irq)
raw_spin_lock_irq(&desc->lock);
+ desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
kstat_incr_irqs_this_cpu(irq, desc);
action = desc->action;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 4e69e24d3d7d..96f3a1d9c379 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -177,8 +177,8 @@ struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
irq_base = irq_alloc_descs(first_irq, first_irq, size,
of_node_to_nid(of_node));
if (irq_base < 0) {
- WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
- first_irq);
+ pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ first_irq);
irq_base = first_irq;
}
} else
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4c69326aa773..35c70c9e24d8 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -616,6 +616,22 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
return ret;
}
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+int irq_set_parent(int irq, int parent_irq)
+{
+ unsigned long flags;
+ struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+
+ if (!desc)
+ return -EINVAL;
+
+ desc->parent_irq = parent_irq;
+
+ irq_put_desc_unlock(desc, flags);
+ return 0;
+}
+#endif
+
/*
* Default primary interrupt handler for threaded interrupts. Is
* assigned as primary handler when request_threaded_irq is called
@@ -716,6 +732,7 @@ static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
+ bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
@@ -730,10 +747,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
}
raw_spin_lock_irq(&desc->lock);
- cpumask_copy(mask, desc->irq_data.affinity);
+ /*
+ * This code is triggered unconditionally. Check the affinity
+ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
+ */
+ if (desc->irq_data.affinity)
+ cpumask_copy(mask, desc->irq_data.affinity);
+ else
+ valid = false;
raw_spin_unlock_irq(&desc->lock);
- set_cpus_allowed_ptr(current, mask);
+ if (valid)
+ set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
@@ -833,6 +858,8 @@ static int irq_thread(void *data)
init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, false);
+ irq_thread_check_affinity(desc, action);
+
while (!irq_wait_for_interrupt(action)) {
irqreturn_t action_ret;
@@ -936,6 +963,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
get_task_struct(t);
new->thread = t;
+ /*
+ * Tell the thread to set its affinity. This is
+ * important for shared interrupt handlers as we do
+ * not invoke setup_affinity() for the secondary
+ * handlers as everything is already set up. Even for
+ * interrupts marked with IRQF_NO_BALANCE this is
+ * correct as we want the thread to move to the cpu(s)
+ * on which the requesting code placed the interrupt.
+ */
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
}
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 6454db7b6a4d..9065107f083e 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -74,6 +74,14 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
if (!desc->irq_data.chip->irq_retrigger ||
!desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
#ifdef CONFIG_HARDIRQS_SW_RESEND
+ /*
+ * If the interrupt has a parent irq and runs
+ * in the thread context of the parent irq,
+ * retrigger the parent.
+ */
+ if (desc->parent_irq &&
+ irq_settings_is_nested_thread(desc))
+ irq = desc->parent_irq;
/* Set it pending and activate the softirq: */
set_bit(irq, irqs_resend);
tasklet_schedule(&resend_tasklet);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 4e316e1acf58..6ada93c23a9a 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -26,7 +26,6 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
-#if defined(CONFIG_HOTPLUG)
/* current uevent sequence number */
static ssize_t uevent_seqnum_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
@@ -54,7 +53,7 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
return count;
}
KERNEL_ATTR_RW(uevent_helper);
-#endif
+
#ifdef CONFIG_PROFILING
static ssize_t profiling_show(struct kobject *kobj,
@@ -141,6 +140,23 @@ static ssize_t fscaps_show(struct kobject *kobj,
}
KERNEL_ATTR_RO(fscaps);
+int rcu_expedited;
+static ssize_t rcu_expedited_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", rcu_expedited);
+}
+static ssize_t rcu_expedited_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (kstrtoint(buf, 0, &rcu_expedited))
+ return -EINVAL;
+
+ return count;
+}
+KERNEL_ATTR_RW(rcu_expedited);
+
/*
* Make /sys/kernel/notes give the raw contents of our kernel .notes section.
*/
@@ -169,10 +185,8 @@ EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
&fscaps_attr.attr,
-#if defined(CONFIG_HOTPLUG)
&uevent_seqnum_attr.attr,
&uevent_helper_attr.attr,
-#endif
#ifdef CONFIG_PROFILING
&profiling_attr.attr,
#endif
@@ -182,6 +196,7 @@ static struct attribute * kernel_attrs[] = {
&kexec_crash_size_attr.attr,
&vmcoreinfo_attr.attr,
#endif
+ &rcu_expedited_attr.attr,
NULL
};
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 91c32a0b612c..b2c71c5873e4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
static void print_name(struct seq_file *m, struct lock_class *class)
{
- char str[128];
+ char str[KSYM_NAME_LEN];
const char *name = class->name;
if (!name) {
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 4646eb2c3820..767e559dfb10 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -21,10 +21,10 @@ struct key *modsign_keyring;
extern __initdata const u8 modsign_certificate_list[];
extern __initdata const u8 modsign_certificate_list_end[];
asm(".section .init.data,\"aw\"\n"
- "modsign_certificate_list:\n"
+ SYMBOL_PREFIX "modsign_certificate_list:\n"
".incbin \"signing_key.x509\"\n"
".incbin \"extra_certificates\"\n"
- "modsign_certificate_list_end:"
+ SYMBOL_PREFIX "modsign_certificate_list_end:"
);
/*
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index ea1b1df5dbb0..f2970bddc5ea 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -27,13 +27,13 @@
* - Information block
*/
struct module_signature {
- enum pkey_algo algo : 8; /* Public-key crypto algorithm */
- enum pkey_hash_algo hash : 8; /* Digest algorithm */
- enum pkey_id_type id_type : 8; /* Key identifier type */
- u8 signer_len; /* Length of signer's name */
- u8 key_id_len; /* Length of key identifier */
- u8 __pad[3];
- __be32 sig_len; /* Length of signature data */
+ u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
+ u8 hash; /* Digest algorithm [enum pkey_hash_algo] */
+ u8 id_type; /* Key identifier type [enum pkey_id_type] */
+ u8 signer_len; /* Length of signer's name */
+ u8 key_id_len; /* Length of key identifier */
+ u8 __pad[3];
+ __be32 sig_len; /* Length of signature data */
};
/*
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 125cb67daa21..d73840271dce 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -217,30 +217,6 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
return 0;
}
-void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
-{
- struct signal_struct *sig = tsk->signal;
- struct task_struct *t;
-
- times->utime = sig->utime;
- times->stime = sig->stime;
- times->sum_exec_runtime = sig->sum_sched_runtime;
-
- rcu_read_lock();
- /* make sure we can trust tsk->thread_group list */
- if (!likely(pid_alive(tsk)))
- goto out;
-
- t = tsk;
- do {
- times->utime += t->utime;
- times->stime += t->stime;
- times->sum_exec_runtime += task_sched_runtime(t);
- } while_each_thread(tsk, t);
-out:
- rcu_read_unlock();
-}
-
static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
{
if (b->utime > a->utime)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f458238109cc..1c16f9167de1 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -59,7 +59,7 @@ static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
{
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 846bd42c7ed1..9322ff7eaad6 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -213,6 +213,69 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
}
/**
+ * pm_qos_flags_remove_req - Remove device PM QoS flags request.
+ * @pqf: Device PM QoS flags set to remove the request from.
+ * @req: Request to remove from the set.
+ */
+static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
+ struct pm_qos_flags_request *req)
+{
+ s32 val = 0;
+
+ list_del(&req->node);
+ list_for_each_entry(req, &pqf->list, node)
+ val |= req->flags;
+
+ pqf->effective_flags = val;
+}
+
+/**
+ * pm_qos_update_flags - Update a set of PM QoS flags.
+ * @pqf: Set of flags to update.
+ * @req: Request to add to the set, to modify, or to remove from the set.
+ * @action: Action to take on the set.
+ * @val: Value of the request to add or modify.
+ *
+ * Update the given set of PM QoS flags and call notifiers if the aggregate
+ * value has changed. Returns 1 if the aggregate constraint value has changed,
+ * 0 otherwise.
+ */
+bool pm_qos_update_flags(struct pm_qos_flags *pqf,
+ struct pm_qos_flags_request *req,
+ enum pm_qos_req_action action, s32 val)
+{
+ unsigned long irqflags;
+ s32 prev_value, curr_value;
+
+ spin_lock_irqsave(&pm_qos_lock, irqflags);
+
+ prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
+
+ switch (action) {
+ case PM_QOS_REMOVE_REQ:
+ pm_qos_flags_remove_req(pqf, req);
+ break;
+ case PM_QOS_UPDATE_REQ:
+ pm_qos_flags_remove_req(pqf, req);
+ case PM_QOS_ADD_REQ:
+ req->flags = val;
+ INIT_LIST_HEAD(&req->node);
+ list_add_tail(&req->node, &pqf->list);
+ pqf->effective_flags |= val;
+ break;
+ default:
+ /* no action */
+ ;
+ }
+
+ curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
+
+ spin_unlock_irqrestore(&pm_qos_lock, irqflags);
+
+ return prev_value != curr_value;
+}
+
+/**
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
@@ -500,7 +563,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
} else {
ascii_value[count] = '\0';
}
- ret = strict_strtoul(ascii_value, 16, &ulval);
+ ret = kstrtoul(ascii_value, 16, &ulval);
if (ret) {
pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
return -EINVAL;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 3c9d764eb0d8..7c33ed200410 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -126,7 +126,7 @@ static int swsusp_extents_insert(unsigned long swap_offset)
/* Figure out where to put the new node */
while (*new) {
- ext = container_of(*new, struct swsusp_extent, node);
+ ext = rb_entry(*new, struct swsusp_extent, node);
parent = *new;
if (swap_offset < ext->start) {
/* Try to merge */
diff --git a/kernel/printk.c b/kernel/printk.c
index 2d607f4d1797..22e070f3470a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -87,6 +87,12 @@ static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map console_lock_dep_map = {
+ .name = "console_lock"
+};
+#endif
+
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
@@ -1908,12 +1914,14 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self,
*/
void console_lock(void)
{
- BUG_ON(in_interrupt());
+ might_sleep();
+
down(&console_sem);
if (console_suspended)
return;
console_locked = 1;
console_may_schedule = 1;
+ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
}
EXPORT_SYMBOL(console_lock);
@@ -1935,6 +1943,7 @@ int console_trylock(void)
}
console_locked = 1;
console_may_schedule = 0;
+ mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
return 1;
}
EXPORT_SYMBOL(console_trylock);
@@ -2095,6 +2104,7 @@ skip:
local_irq_restore(flags);
}
console_locked = 0;
+ mutex_release(&console_lock_dep_map, 1, _RET_IP_);
/* Release the exclusive_console once it is used */
if (unlikely(exclusive_console))
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 8ba99cdc6515..20dfba576c2b 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -109,4 +109,6 @@ static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
}
}
+extern int rcu_expedited;
+
#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 29ca1c6da594..a2cf76177b44 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -46,12 +46,15 @@
#include <linux/export.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <linux/module.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rcu.h>
#include "rcu.h"
+module_param(rcu_expedited, int, 0);
+
#ifdef CONFIG_PREEMPT_RCU
/*
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index e4c6a598d6f7..e7dce58f9c2a 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -195,7 +195,7 @@ EXPORT_SYMBOL(rcu_is_cpu_idle);
*/
int rcu_is_cpu_rrupt_from_idle(void)
{
- return rcu_dynticks_nesting <= 0;
+ return rcu_dynticks_nesting <= 1;
}
/*
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 3d0190282204..f85016a2309b 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -706,7 +706,10 @@ void synchronize_rcu(void)
return;
/* Once we get past the fastpath checks, same code as rcu_barrier(). */
- rcu_barrier();
+ if (rcu_expedited)
+ synchronize_rcu_expedited();
+ else
+ rcu_barrier();
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index aaa7b9f3532a..31dea01c85fd 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -339,7 +339,6 @@ rcu_stutter_wait(char *title)
struct rcu_torture_ops {
void (*init)(void);
- void (*cleanup)(void);
int (*readlock)(void);
void (*read_delay)(struct rcu_random_state *rrsp);
void (*readunlock)(int idx);
@@ -431,7 +430,6 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
static struct rcu_torture_ops rcu_ops = {
.init = NULL,
- .cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
@@ -475,7 +473,6 @@ static void rcu_sync_torture_init(void)
static struct rcu_torture_ops rcu_sync_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
@@ -493,7 +490,6 @@ static struct rcu_torture_ops rcu_sync_ops = {
static struct rcu_torture_ops rcu_expedited_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_torture_read_unlock,
@@ -536,7 +532,6 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
static struct rcu_torture_ops rcu_bh_ops = {
.init = NULL,
- .cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
@@ -553,7 +548,6 @@ static struct rcu_torture_ops rcu_bh_ops = {
static struct rcu_torture_ops rcu_bh_sync_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
@@ -570,7 +564,6 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
static struct rcu_torture_ops rcu_bh_expedited_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
@@ -589,19 +582,7 @@ static struct rcu_torture_ops rcu_bh_expedited_ops = {
* Definitions for srcu torture testing.
*/
-static struct srcu_struct srcu_ctl;
-
-static void srcu_torture_init(void)
-{
- init_srcu_struct(&srcu_ctl);
- rcu_sync_torture_init();
-}
-
-static void srcu_torture_cleanup(void)
-{
- synchronize_srcu(&srcu_ctl);
- cleanup_srcu_struct(&srcu_ctl);
-}
+DEFINE_STATIC_SRCU(srcu_ctl);
static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
{
@@ -672,8 +653,7 @@ static int srcu_torture_stats(char *page)
}
static struct rcu_torture_ops srcu_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
+ .init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
@@ -687,8 +667,7 @@ static struct rcu_torture_ops srcu_ops = {
};
static struct rcu_torture_ops srcu_sync_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
+ .init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
@@ -712,8 +691,7 @@ static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
}
static struct rcu_torture_ops srcu_raw_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
+ .init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock_raw,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock_raw,
@@ -727,8 +705,7 @@ static struct rcu_torture_ops srcu_raw_ops = {
};
static struct rcu_torture_ops srcu_raw_sync_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
+ .init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock_raw,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock_raw,
@@ -747,8 +724,7 @@ static void srcu_torture_synchronize_expedited(void)
}
static struct rcu_torture_ops srcu_expedited_ops = {
- .init = srcu_torture_init,
- .cleanup = srcu_torture_cleanup,
+ .init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock,
.read_delay = srcu_read_delay,
.readunlock = srcu_torture_read_unlock,
@@ -783,7 +759,6 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
static struct rcu_torture_ops sched_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
@@ -799,7 +774,6 @@ static struct rcu_torture_ops sched_ops = {
static struct rcu_torture_ops sched_sync_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
@@ -814,7 +788,6 @@ static struct rcu_torture_ops sched_sync_ops = {
static struct rcu_torture_ops sched_expedited_ops = {
.init = rcu_sync_torture_init,
- .cleanup = NULL,
.readlock = sched_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = sched_torture_read_unlock,
@@ -1396,12 +1369,16 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
"fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
"test_boost=%d/%d test_boost_interval=%d "
"test_boost_duration=%d shutdown_secs=%d "
+ "stall_cpu=%d stall_cpu_holdoff=%d "
+ "n_barrier_cbs=%d "
"onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, nrealreaders, nfakewriters,
stat_interval, verbose, test_no_idle_hz, shuffle_interval,
stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
test_boost, cur_ops->can_boost,
test_boost_interval, test_boost_duration, shutdown_secs,
+ stall_cpu, stall_cpu_holdoff,
+ n_barrier_cbs,
onoff_interval, onoff_holdoff);
}
@@ -1502,6 +1479,7 @@ rcu_torture_onoff(void *arg)
unsigned long delta;
int maxcpu = -1;
DEFINE_RCU_RANDOM(rand);
+ int ret;
unsigned long starttime;
VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
@@ -1522,7 +1500,13 @@ rcu_torture_onoff(void *arg)
torture_type, cpu);
starttime = jiffies;
n_offline_attempts++;
- if (cpu_down(cpu) == 0) {
+ ret = cpu_down(cpu);
+ if (ret) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "rcu_torture_onoff task: offline %d failed: errno %d\n",
+ torture_type, cpu, ret);
+ } else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: offlined %d\n",
@@ -1936,8 +1920,6 @@ rcu_torture_cleanup(void)
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
- if (cur_ops->cleanup)
- cur_ops->cleanup();
if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
else if (n_online_successes != n_online_attempts ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 74df86bd9204..e441b77b614e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -68,9 +68,9 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
- .gpnum = -300, \
- .completed = -300, \
- .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
+ .gpnum = 0UL - 300UL, \
+ .completed = 0UL - 300UL, \
+ .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
.orphan_nxttail = &sname##_state.orphan_nxtlist, \
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
@@ -207,18 +207,15 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
-#if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE)
- .ignore_user_qs = true,
-#endif
};
-static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
-static int qhimark = 10000; /* If this many pending, ignore blimit. */
-static int qlowmark = 100; /* Once only this many pending, use blimit. */
+static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
+static long qhimark = 10000; /* If this many pending, ignore blimit. */
+static long qlowmark = 100; /* Once only this many pending, use blimit. */
-module_param(blimit, int, 0444);
-module_param(qhimark, int, 0444);
-module_param(qlowmark, int, 0444);
+module_param(blimit, long, 0444);
+module_param(qhimark, long, 0444);
+module_param(qlowmark, long, 0444);
int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
@@ -303,7 +300,8 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
static int
cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
{
- return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
+ return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
+ rdp->nxttail[RCU_DONE_TAIL] != NULL;
}
/*
@@ -312,8 +310,11 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
static int
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
- return *rdp->nxttail[RCU_DONE_TAIL +
- ACCESS_ONCE(rsp->completed) != rdp->completed] &&
+ struct rcu_head **ntp;
+
+ ntp = rdp->nxttail[RCU_DONE_TAIL +
+ (ACCESS_ONCE(rsp->completed) != rdp->completed)];
+ return rdp->nxttail[RCU_DONE_TAIL] && ntp && *ntp &&
!rcu_gp_in_progress(rsp);
}
@@ -416,29 +417,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
*/
void rcu_user_enter(void)
{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- /*
- * Some contexts may involve an exception occuring in an irq,
- * leading to that nesting:
- * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
- * This would mess up the dyntick_nesting count though. And rcu_irq_*()
- * helpers are enough to protect RCU uses inside the exception. So
- * just return immediately if we detect we are in an IRQ.
- */
- if (in_interrupt())
- return;
-
- WARN_ON_ONCE(!current->mm);
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- if (!rdtp->ignore_user_qs && !rdtp->in_user) {
- rdtp->in_user = true;
- rcu_eqs_enter(true);
- }
- local_irq_restore(flags);
+ rcu_eqs_enter(1);
}
/**
@@ -575,27 +554,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
*/
void rcu_user_exit(void)
{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- /*
- * Some contexts may involve an exception occuring in an irq,
- * leading to that nesting:
- * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
- * This would mess up the dyntick_nesting count though. And rcu_irq_*()
- * helpers are enough to protect RCU uses inside the exception. So
- * just return immediately if we detect we are in an IRQ.
- */
- if (in_interrupt())
- return;
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- if (rdtp->in_user) {
- rdtp->in_user = false;
- rcu_eqs_exit(true);
- }
- local_irq_restore(flags);
+ rcu_eqs_exit(1);
}
/**
@@ -718,21 +677,6 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
-#ifdef CONFIG_RCU_USER_QS
-void rcu_user_hooks_switch(struct task_struct *prev,
- struct task_struct *next)
-{
- struct rcu_dynticks *rdtp;
-
- /* Interrupts are disabled in context switch */
- rdtp = &__get_cpu_var(rcu_dynticks);
- if (!rdtp->ignore_user_qs) {
- clear_tsk_thread_flag(prev, TIF_NOHZ);
- set_tsk_thread_flag(next, TIF_NOHZ);
- }
-}
-#endif /* #ifdef CONFIG_RCU_USER_QS */
-
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
@@ -873,6 +817,29 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
}
+/*
+ * Dump stacks of all tasks running on stalled CPUs. This is a fallback
+ * for architectures that do not implement trigger_all_cpu_backtrace().
+ * The NMI-triggered stack traces are more accurate because they are
+ * printed by the target CPU.
+ */
+static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
+{
+ int cpu;
+ unsigned long flags;
+ struct rcu_node *rnp;
+
+ rcu_for_each_leaf_node(rsp, rnp) {
+ raw_spin_lock_irqsave(&rnp->lock, flags);
+ if (rnp->qsmask != 0) {
+ for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
+ if (rnp->qsmask & (1UL << cpu))
+ dump_cpu_task(rnp->grplo + cpu);
+ }
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ }
+}
+
static void print_other_cpu_stall(struct rcu_state *rsp)
{
int cpu;
@@ -880,6 +847,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
unsigned long flags;
int ndetected = 0;
struct rcu_node *rnp = rcu_get_root(rsp);
+ long totqlen = 0;
/* Only let one CPU complain about others per time interval. */
@@ -924,12 +892,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
print_cpu_stall_info_end();
- printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n",
- smp_processor_id(), (long)(jiffies - rsp->gp_start));
+ for_each_possible_cpu(cpu)
+ totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
+ pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
+ smp_processor_id(), (long)(jiffies - rsp->gp_start),
+ rsp->gpnum, rsp->completed, totqlen);
if (ndetected == 0)
printk(KERN_ERR "INFO: Stall ended before state dump start\n");
else if (!trigger_all_cpu_backtrace())
- dump_stack();
+ rcu_dump_cpu_stacks(rsp);
/* Complain about tasks blocking the grace period. */
@@ -940,8 +911,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
static void print_cpu_stall(struct rcu_state *rsp)
{
+ int cpu;
unsigned long flags;
struct rcu_node *rnp = rcu_get_root(rsp);
+ long totqlen = 0;
/*
* OK, time to rat on ourselves...
@@ -952,7 +925,10 @@ static void print_cpu_stall(struct rcu_state *rsp)
print_cpu_stall_info_begin();
print_cpu_stall_info(rsp, smp_processor_id());
print_cpu_stall_info_end();
- printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start);
+ for_each_possible_cpu(cpu)
+ totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
+ pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
+ jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
if (!trigger_all_cpu_backtrace())
dump_stack();
@@ -1091,6 +1067,7 @@ static void init_callback_list(struct rcu_data *rdp)
rdp->nxtlist = NULL;
for (i = 0; i < RCU_NEXT_SIZE; i++)
rdp->nxttail[i] = &rdp->nxtlist;
+ init_nocb_callback_list(rdp);
}
/*
@@ -1404,15 +1381,37 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
!cpu_needs_another_gp(rsp, rdp)) {
/*
* Either we have not yet spawned the grace-period
- * task or this CPU does not need another grace period.
+ * task, this CPU does not need another grace period,
+ * or a grace period is already in progress.
* Either way, don't start a new grace period.
*/
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return;
}
+ /*
+ * Because there is no grace period in progress right now,
+ * any callbacks we have up to this point will be satisfied
+ * by the next grace period. So promote all callbacks to be
+ * handled after the end of the next grace period. If the
+ * CPU is not yet aware of the end of the previous grace period,
+ * we need to allow for the callback advancement that will
+ * occur when it does become aware. Deadlock prevents us from
+ * making it aware at this point: We cannot acquire a leaf
+ * rcu_node ->lock while holding the root rcu_node ->lock.
+ */
+ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+ if (rdp->completed == rsp->completed)
+ rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
+
rsp->gp_flags = RCU_GP_FLAG_INIT;
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+
+ /* Ensure that CPU is aware of completion of last grace period. */
+ rcu_process_gp_end(rsp, rdp);
+ local_irq_restore(flags);
+
+ /* Wake up rcu_gp_kthread() to start the grace period. */
wake_up(&rsp->gp_wq);
}
@@ -1573,16 +1572,20 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
/*
* Send the specified CPU's RCU callbacks to the orphanage. The
* specified CPU must be offline, and the caller must hold the
- * ->onofflock.
+ * ->orphan_lock.
*/
static void
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp)
{
+ /* No-CBs CPUs do not have orphanable callbacks. */
+ if (is_nocb_cpu(rdp->cpu))
+ return;
+
/*
* Orphan the callbacks. First adjust the counts. This is safe
- * because ->onofflock excludes _rcu_barrier()'s adoption of
- * the callbacks, thus no memory barrier is required.
+ * because _rcu_barrier() excludes CPU-hotplug operations, so it
+ * cannot be running now. Thus no memory barrier is required.
*/
if (rdp->nxtlist != NULL) {
rsp->qlen_lazy += rdp->qlen_lazy;
@@ -1623,13 +1626,17 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
/*
* Adopt the RCU callbacks from the specified rcu_state structure's
- * orphanage. The caller must hold the ->onofflock.
+ * orphanage. The caller must hold the ->orphan_lock.
*/
static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
{
int i;
struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
+ /* No-CBs CPUs are handled specially. */
+ if (rcu_nocb_adopt_orphan_cbs(rsp, rdp))
+ return;
+
/* Do the accounting first. */
rdp->qlen_lazy += rsp->qlen_lazy;
rdp->qlen += rsp->qlen;
@@ -1702,7 +1709,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/* Exclude any attempts to start a new grace period. */
mutex_lock(&rsp->onoff_mutex);
- raw_spin_lock_irqsave(&rsp->onofflock, flags);
+ raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
@@ -1729,10 +1736,10 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
/*
* We still hold the leaf rcu_node structure lock here, and
* irqs are still disabled. The reason for this subterfuge is
- * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
+ * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
* held leads to deadlock.
*/
- raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
+ raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
rnp = rdp->mynode;
if (need_report & RCU_OFL_TASKS_NORM_GP)
rcu_report_unblock_qs_rnp(rnp, flags);
@@ -1769,7 +1776,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *next, *list, **tail;
- int bl, count, count_lazy, i;
+ long bl, count, count_lazy;
+ int i;
/* If no callbacks are ready, just return.*/
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -2107,9 +2115,15 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
}
}
+/*
+ * Helper function for call_rcu() and friends. The cpu argument will
+ * normally be -1, indicating "currently running CPU". It may specify
+ * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
+ * is expected to specify a CPU.
+ */
static void
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
- struct rcu_state *rsp, bool lazy)
+ struct rcu_state *rsp, int cpu, bool lazy)
{
unsigned long flags;
struct rcu_data *rdp;
@@ -2129,9 +2143,14 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rdp = this_cpu_ptr(rsp->rda);
/* Add the callback to our list. */
- if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL)) {
+ if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
+ int offline;
+
+ if (cpu != -1)
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ offline = !__call_rcu_nocb(rdp, head, lazy);
+ WARN_ON_ONCE(offline);
/* _call_rcu() is illegal on offline CPU; leak the callback. */
- WARN_ON_ONCE(1);
local_irq_restore(flags);
return;
}
@@ -2160,7 +2179,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_sched_state, 0);
+ __call_rcu(head, func, &rcu_sched_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -2169,7 +2188,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
*/
void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_bh_state, 0);
+ __call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -2205,10 +2224,28 @@ static inline int rcu_blocking_is_gp(void)
* rcu_read_lock_sched().
*
* This means that all preempt_disable code sequences, including NMI and
- * hardware-interrupt handlers, in progress on entry will have completed
- * before this primitive returns. However, this does not guarantee that
- * softirq handlers will have completed, since in some kernels, these
- * handlers can run in process context, and can block.
+ * non-threaded hardware-interrupt handlers, in progress on entry will
+ * have completed before this primitive returns. However, this does not
+ * guarantee that softirq handlers will have completed, since in some
+ * kernels, these handlers can run in process context, and can block.
+ *
+ * Note that this guarantee implies further memory-ordering guarantees.
+ * On systems with more than one CPU, when synchronize_sched() returns,
+ * each CPU is guaranteed to have executed a full memory barrier since the
+ * end of its last RCU-sched read-side critical section whose beginning
+ * preceded the call to synchronize_sched(). In addition, each CPU having
+ * an RCU read-side critical section that extends beyond the return from
+ * synchronize_sched() is guaranteed to have executed a full memory barrier
+ * after the beginning of synchronize_sched() and before the beginning of
+ * that RCU read-side critical section. Note that these guarantees include
+ * CPUs that are offline, idle, or executing in user mode, as well as CPUs
+ * that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked synchronize_sched(), which returned
+ * to its caller on CPU B, then both CPU A and CPU B are guaranteed
+ * to have executed a full memory barrier during the execution of
+ * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
+ * again only if the system has more than one CPU).
*
* This primitive provides the guarantees made by the (now removed)
* synchronize_kernel() API. In contrast, synchronize_rcu() only
@@ -2224,7 +2261,10 @@ void synchronize_sched(void)
"Illegal synchronize_sched() in RCU-sched read-side critical section");
if (rcu_blocking_is_gp())
return;
- wait_rcu_gp(call_rcu_sched);
+ if (rcu_expedited)
+ synchronize_sched_expedited();
+ else
+ wait_rcu_gp(call_rcu_sched);
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -2236,6 +2276,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
* and may be nested.
+ *
+ * See the description of synchronize_sched() for more detailed information
+ * on memory ordering guarantees.
*/
void synchronize_rcu_bh(void)
{
@@ -2245,13 +2288,13 @@ void synchronize_rcu_bh(void)
"Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
if (rcu_blocking_is_gp())
return;
- wait_rcu_gp(call_rcu_bh);
+ if (rcu_expedited)
+ synchronize_rcu_bh_expedited();
+ else
+ wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
-
static int synchronize_sched_expedited_cpu_stop(void *data)
{
/*
@@ -2308,10 +2351,32 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
*/
void synchronize_sched_expedited(void)
{
- int firstsnap, s, snap, trycount = 0;
+ long firstsnap, s, snap;
+ int trycount = 0;
+ struct rcu_state *rsp = &rcu_sched_state;
+
+ /*
+ * If we are in danger of counter wrap, just do synchronize_sched().
+ * By allowing sync_sched_expedited_started to advance no more than
+ * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
+ * that more than 3.5 billion CPUs would be required to force a
+ * counter wrap on a 32-bit system. Quite a few more CPUs would of
+ * course be required on a 64-bit system.
+ */
+ if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
+ (ulong)atomic_long_read(&rsp->expedited_done) +
+ ULONG_MAX / 8)) {
+ synchronize_sched();
+ atomic_long_inc(&rsp->expedited_wrap);
+ return;
+ }
- /* Note that atomic_inc_return() implies full memory barrier. */
- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
+ /*
+ * Take a ticket. Note that atomic_inc_return() implies a
+ * full memory barrier.
+ */
+ snap = atomic_long_inc_return(&rsp->expedited_start);
+ firstsnap = snap;
get_online_cpus();
WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
@@ -2323,48 +2388,65 @@ void synchronize_sched_expedited(void)
synchronize_sched_expedited_cpu_stop,
NULL) == -EAGAIN) {
put_online_cpus();
+ atomic_long_inc(&rsp->expedited_tryfail);
+
+ /* Check to see if someone else did our work for us. */
+ s = atomic_long_read(&rsp->expedited_done);
+ if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
+ /* ensure test happens before caller kfree */
+ smp_mb__before_atomic_inc(); /* ^^^ */
+ atomic_long_inc(&rsp->expedited_workdone1);
+ return;
+ }
/* No joy, try again later. Or just synchronize_sched(). */
if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
} else {
- synchronize_sched();
+ wait_rcu_gp(call_rcu_sched);
+ atomic_long_inc(&rsp->expedited_normal);
return;
}
- /* Check to see if someone else did our work for us. */
- s = atomic_read(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
- smp_mb(); /* ensure test happens before caller kfree */
+ /* Recheck to see if someone else did our work for us. */
+ s = atomic_long_read(&rsp->expedited_done);
+ if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
+ /* ensure test happens before caller kfree */
+ smp_mb__before_atomic_inc(); /* ^^^ */
+ atomic_long_inc(&rsp->expedited_workdone2);
return;
}
/*
* Refetching sync_sched_expedited_started allows later
- * callers to piggyback on our grace period. We subtract
- * 1 to get the same token that the last incrementer got.
- * We retry after they started, so our grace period works
- * for them, and they started after our first try, so their
- * grace period works for us.
+ * callers to piggyback on our grace period. We retry
+ * after they started, so our grace period works for them,
+ * and they started after our first try, so their grace
+ * period works for us.
*/
get_online_cpus();
- snap = atomic_read(&sync_sched_expedited_started);
+ snap = atomic_long_read(&rsp->expedited_start);
smp_mb(); /* ensure read is before try_stop_cpus(). */
}
+ atomic_long_inc(&rsp->expedited_stoppedcpus);
/*
* Everyone up to our most recent fetch is covered by our grace
* period. Update the counter, but only if our work is still
* relevant -- which it won't be if someone who started later
- * than we did beat us to the punch.
+ * than we did already did their update.
*/
do {
- s = atomic_read(&sync_sched_expedited_done);
- if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
- smp_mb(); /* ensure test happens before caller kfree */
+ atomic_long_inc(&rsp->expedited_done_tries);
+ s = atomic_long_read(&rsp->expedited_done);
+ if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
+ /* ensure test happens before caller kfree */
+ smp_mb__before_atomic_inc(); /* ^^^ */
+ atomic_long_inc(&rsp->expedited_done_lost);
break;
}
- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
+ } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
+ atomic_long_inc(&rsp->expedited_done_exit);
put_online_cpus();
}
@@ -2558,9 +2640,17 @@ static void _rcu_barrier(struct rcu_state *rsp)
* When that callback is invoked, we will know that all of the
* corresponding CPU's preceding callbacks have been invoked.
*/
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
+ if (!cpu_online(cpu) && !is_nocb_cpu(cpu))
+ continue;
rdp = per_cpu_ptr(rsp->rda, cpu);
- if (ACCESS_ONCE(rdp->qlen)) {
+ if (is_nocb_cpu(cpu)) {
+ _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
+ rsp->n_barrier_done);
+ atomic_inc(&rsp->barrier_cpu_count);
+ __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
+ rsp, cpu, 0);
+ } else if (ACCESS_ONCE(rdp->qlen)) {
_rcu_barrier_trace(rsp, "OnlineQ", cpu,
rsp->n_barrier_done);
smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
@@ -2634,6 +2724,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
#endif
rdp->cpu = cpu;
rdp->rsp = rsp;
+ rcu_boot_init_nocb_percpu_data(rdp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
@@ -2715,6 +2806,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
+ int ret = NOTIFY_OK;
trace_rcu_utilization("Start CPU hotplug");
switch (action) {
@@ -2728,7 +2820,10 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
rcu_boost_kthread_setaffinity(rnp, -1);
break;
case CPU_DOWN_PREPARE:
- rcu_boost_kthread_setaffinity(rnp, cpu);
+ if (nocb_cpu_expendable(cpu))
+ rcu_boost_kthread_setaffinity(rnp, cpu);
+ else
+ ret = NOTIFY_BAD;
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
@@ -2752,7 +2847,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
break;
}
trace_rcu_utilization("End CPU hotplug");
- return NOTIFY_OK;
+ return ret;
}
/*
@@ -2772,6 +2867,7 @@ static int __init rcu_spawn_gp_kthread(void)
raw_spin_lock_irqsave(&rnp->lock, flags);
rsp->gp_kthread = t;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ rcu_spawn_nocb_kthreads(rsp);
}
return 0;
}
@@ -2967,6 +3063,7 @@ void __init rcu_init(void)
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
+ rcu_init_nocb();
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
/*
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index a240f032848e..4b69291b093d 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -287,6 +287,7 @@ struct rcu_data {
long qlen_last_fqs_check;
/* qlen at last check for QS forcing */
unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */
+ unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */
unsigned long n_force_qs_snap;
@@ -317,6 +318,18 @@ struct rcu_data {
struct rcu_head oom_head;
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+ /* 7) Callback offloading. */
+#ifdef CONFIG_RCU_NOCB_CPU
+ struct rcu_head *nocb_head; /* CBs waiting for kthread. */
+ struct rcu_head **nocb_tail;
+ atomic_long_t nocb_q_count; /* # CBs waiting for kthread */
+ atomic_long_t nocb_q_count_lazy; /* (approximate). */
+ int nocb_p_count; /* # CBs being invoked by kthread */
+ int nocb_p_count_lazy; /* (approximate). */
+ wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */
+ struct task_struct *nocb_kthread;
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+
int cpu;
struct rcu_state *rsp;
};
@@ -369,6 +382,12 @@ struct rcu_state {
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
void (*func)(struct rcu_head *head));
+#ifdef CONFIG_RCU_NOCB_CPU
+ void (*call_remote)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+ /* call_rcu() flavor, but for */
+ /* placing on remote CPU. */
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
/* The following fields are guarded by the root rcu_node's lock. */
@@ -383,9 +402,8 @@ struct rcu_state {
/* End of fields guarded by root rcu_node's lock. */
- raw_spinlock_t onofflock ____cacheline_internodealigned_in_smp;
- /* exclude on/offline and */
- /* starting new GP. */
+ raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
+ /* Protect following fields. */
struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */
/* need a grace period. */
struct rcu_head **orphan_nxttail; /* Tail of above. */
@@ -394,7 +412,7 @@ struct rcu_state {
struct rcu_head **orphan_donetail; /* Tail of above. */
long qlen_lazy; /* Number of lazy callbacks. */
long qlen; /* Total number of callbacks. */
- /* End of fields guarded by onofflock. */
+ /* End of fields guarded by orphan_lock. */
struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */
@@ -405,6 +423,18 @@ struct rcu_state {
/* _rcu_barrier(). */
/* End of fields guarded by barrier_mutex. */
+ atomic_long_t expedited_start; /* Starting ticket. */
+ atomic_long_t expedited_done; /* Done ticket. */
+ atomic_long_t expedited_wrap; /* # near-wrap incidents. */
+ atomic_long_t expedited_tryfail; /* # acquisition failures. */
+ atomic_long_t expedited_workdone1; /* # done by others #1. */
+ atomic_long_t expedited_workdone2; /* # done by others #2. */
+ atomic_long_t expedited_normal; /* # fallbacks to normal. */
+ atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
+ atomic_long_t expedited_done_tries; /* # tries to update _done. */
+ atomic_long_t expedited_done_lost; /* # times beaten to _done. */
+ atomic_long_t expedited_done_exit; /* # times exited _done loop. */
+
unsigned long jiffies_force_qs; /* Time at which to invoke */
/* force_quiescent_state(). */
unsigned long n_force_qs; /* Number of calls to */
@@ -428,6 +458,8 @@ struct rcu_state {
#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */
extern struct list_head rcu_struct_flavors;
+
+/* Sequence through rcu_state structures for each RCU flavor. */
#define for_each_rcu_flavor(rsp) \
list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
@@ -504,5 +536,32 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void);
+static bool is_nocb_cpu(int cpu);
+static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
+ bool lazy);
+static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
+ struct rcu_data *rdp);
+static bool nocb_cpu_expendable(int cpu);
+static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
+static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
+static void init_nocb_callback_list(struct rcu_data *rdp);
+static void __init rcu_init_nocb(void);
#endif /* #ifndef RCU_TREE_NONCORE */
+
+#ifdef CONFIG_RCU_TRACE
+#ifdef CONFIG_RCU_NOCB_CPU
+/* Sum up queue lengths for tracing. */
+static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
+{
+ *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
+ *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
+}
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
+{
+ *ql = 0;
+ *qll = 0;
+}
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+#endif /* #ifdef CONFIG_RCU_TRACE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f92115488187..f6e5ec2932b4 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -25,6 +25,7 @@
*/
#include <linux/delay.h>
+#include <linux/gfp.h>
#include <linux/oom.h>
#include <linux/smpboot.h>
@@ -36,6 +37,14 @@
#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
#endif
+#ifdef CONFIG_RCU_NOCB_CPU
+static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
+static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
+static bool rcu_nocb_poll; /* Offload kthread are to poll. */
+module_param(rcu_nocb_poll, bool, 0444);
+static char __initdata nocb_buf[NR_CPUS * 5];
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
+
/*
* Check the RCU kernel configuration parameters and print informative
* messages about anything out of the ordinary. If you like #ifdef, you
@@ -76,6 +85,18 @@ static void __init rcu_bootup_announce_oddness(void)
printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
if (nr_cpu_ids != NR_CPUS)
printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
+#ifdef CONFIG_RCU_NOCB_CPU
+ if (have_rcu_nocb_mask) {
+ if (cpumask_test_cpu(0, rcu_nocb_mask)) {
+ cpumask_clear_cpu(0, rcu_nocb_mask);
+ pr_info("\tCPU 0: illegal no-CBs CPU (cleared).\n");
+ }
+ cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
+ pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
+ if (rcu_nocb_poll)
+ pr_info("\tExperimental polled no-CBs CPUs.\n");
+ }
+#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
}
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -642,7 +663,7 @@ static void rcu_preempt_do_callbacks(void)
*/
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_preempt_state, 0);
+ __call_rcu(head, func, &rcu_preempt_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu);
@@ -656,7 +677,7 @@ EXPORT_SYMBOL_GPL(call_rcu);
void kfree_call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_preempt_state, 1);
+ __call_rcu(head, func, &rcu_preempt_state, -1, 1);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
@@ -670,6 +691,9 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
* concurrently with new RCU read-side critical sections that began while
* synchronize_rcu() was waiting. RCU read-side critical sections are
* delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
+ *
+ * See the description of synchronize_sched() for more detailed information
+ * on memory ordering guarantees.
*/
void synchronize_rcu(void)
{
@@ -679,7 +703,10 @@ void synchronize_rcu(void)
"Illegal synchronize_rcu() in RCU read-side critical section");
if (!rcu_scheduler_active)
return;
- wait_rcu_gp(call_rcu);
+ if (rcu_expedited)
+ synchronize_rcu_expedited();
+ else
+ wait_rcu_gp(call_rcu);
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
@@ -757,7 +784,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
* grace period for the specified rcu_node structure. If there are no such
* tasks, report it up the rcu_node hierarchy.
*
- * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
+ * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
+ * CPU hotplug operations.
*/
static void
sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
@@ -831,7 +859,7 @@ void synchronize_rcu_expedited(void)
udelay(trycount * num_online_cpus());
} else {
put_online_cpus();
- synchronize_rcu();
+ wait_rcu_gp(call_rcu);
return;
}
}
@@ -875,6 +903,11 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
/**
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
+ *
+ * Note that this primitive does not necessarily wait for an RCU grace period
+ * to complete. For example, if there are no RCU callbacks queued anywhere
+ * in the system, then rcu_barrier() is within its rights to return
+ * immediately, without waiting for anything, much less an RCU grace period.
*/
void rcu_barrier(void)
{
@@ -1013,7 +1046,7 @@ static void rcu_preempt_check_callbacks(int cpu)
void kfree_call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu))
{
- __call_rcu(head, func, &rcu_sched_state, 1);
+ __call_rcu(head, func, &rcu_sched_state, -1, 1);
}
EXPORT_SYMBOL_GPL(kfree_call_rcu);
@@ -2092,3 +2125,373 @@ static void increment_cpu_stall_ticks(void)
}
#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+
+/*
+ * Offload callback processing from the boot-time-specified set of CPUs
+ * specified by rcu_nocb_mask. For each CPU in the set, there is a
+ * kthread created that pulls the callbacks from the corresponding CPU,
+ * waits for a grace period to elapse, and invokes the callbacks.
+ * The no-CBs CPUs do a wake_up() on their kthread when they insert
+ * a callback into any empty list, unless the rcu_nocb_poll boot parameter
+ * has been specified, in which case each kthread actively polls its
+ * CPU. (Which isn't so great for energy efficiency, but which does
+ * reduce RCU's overhead on that CPU.)
+ *
+ * This is intended to be used in conjunction with Frederic Weisbecker's
+ * adaptive-idle work, which would seriously reduce OS jitter on CPUs
+ * running CPU-bound user-mode computations.
+ *
+ * Offloading of callback processing could also in theory be used as
+ * an energy-efficiency measure because CPUs with no RCU callbacks
+ * queued are more aggressive about entering dyntick-idle mode.
+ */
+
+
+/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
+static int __init rcu_nocb_setup(char *str)
+{
+ alloc_bootmem_cpumask_var(&rcu_nocb_mask);
+ have_rcu_nocb_mask = true;
+ cpulist_parse(str, rcu_nocb_mask);
+ return 1;
+}
+__setup("rcu_nocbs=", rcu_nocb_setup);
+
+/* Is the specified CPU a no-CPUs CPU? */
+static bool is_nocb_cpu(int cpu)
+{
+ if (have_rcu_nocb_mask)
+ return cpumask_test_cpu(cpu, rcu_nocb_mask);
+ return false;
+}
+
+/*
+ * Enqueue the specified string of rcu_head structures onto the specified
+ * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
+ * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
+ * counts are supplied by rhcount and rhcount_lazy.
+ *
+ * If warranted, also wake up the kthread servicing this CPUs queues.
+ */
+static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
+ struct rcu_head *rhp,
+ struct rcu_head **rhtp,
+ int rhcount, int rhcount_lazy)
+{
+ int len;
+ struct rcu_head **old_rhpp;
+ struct task_struct *t;
+
+ /* Enqueue the callback on the nocb list and update counts. */
+ old_rhpp = xchg(&rdp->nocb_tail, rhtp);
+ ACCESS_ONCE(*old_rhpp) = rhp;
+ atomic_long_add(rhcount, &rdp->nocb_q_count);
+ atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
+
+ /* If we are not being polled and there is a kthread, awaken it ... */
+ t = ACCESS_ONCE(rdp->nocb_kthread);
+ if (rcu_nocb_poll | !t)
+ return;
+ len = atomic_long_read(&rdp->nocb_q_count);
+ if (old_rhpp == &rdp->nocb_head) {
+ wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
+ rdp->qlen_last_fqs_check = 0;
+ } else if (len > rdp->qlen_last_fqs_check + qhimark) {
+ wake_up_process(t); /* ... or if many callbacks queued. */
+ rdp->qlen_last_fqs_check = LONG_MAX / 2;
+ }
+ return;
+}
+
+/*
+ * This is a helper for __call_rcu(), which invokes this when the normal
+ * callback queue is inoperable. If this is not a no-CBs CPU, this
+ * function returns failure back to __call_rcu(), which can complain
+ * appropriately.
+ *
+ * Otherwise, this function queues the callback where the corresponding
+ * "rcuo" kthread can find it.
+ */
+static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
+ bool lazy)
+{
+
+ if (!is_nocb_cpu(rdp->cpu))
+ return 0;
+ __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
+ return 1;
+}
+
+/*
+ * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
+ * not a no-CBs CPU.
+ */
+static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
+ struct rcu_data *rdp)
+{
+ long ql = rsp->qlen;
+ long qll = rsp->qlen_lazy;
+
+ /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
+ if (!is_nocb_cpu(smp_processor_id()))
+ return 0;
+ rsp->qlen = 0;
+ rsp->qlen_lazy = 0;
+
+ /* First, enqueue the donelist, if any. This preserves CB ordering. */
+ if (rsp->orphan_donelist != NULL) {
+ __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
+ rsp->orphan_donetail, ql, qll);
+ ql = qll = 0;
+ rsp->orphan_donelist = NULL;
+ rsp->orphan_donetail = &rsp->orphan_donelist;
+ }
+ if (rsp->orphan_nxtlist != NULL) {
+ __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
+ rsp->orphan_nxttail, ql, qll);
+ ql = qll = 0;
+ rsp->orphan_nxtlist = NULL;
+ rsp->orphan_nxttail = &rsp->orphan_nxtlist;
+ }
+ return 1;
+}
+
+/*
+ * There must be at least one non-no-CBs CPU in operation at any given
+ * time, because no-CBs CPUs are not capable of initiating grace periods
+ * independently. This function therefore complains if the specified
+ * CPU is the last non-no-CBs CPU, allowing the CPU-hotplug system to
+ * avoid offlining the last such CPU. (Recursion is a wonderful thing,
+ * but you have to have a base case!)
+ */
+static bool nocb_cpu_expendable(int cpu)
+{
+ cpumask_var_t non_nocb_cpus;
+ int ret;
+
+ /*
+ * If there are no no-CB CPUs or if this CPU is not a no-CB CPU,
+ * then offlining this CPU is harmless. Let it happen.
+ */
+ if (!have_rcu_nocb_mask || is_nocb_cpu(cpu))
+ return 1;
+
+ /* If no memory, play it safe and keep the CPU around. */
+ if (!alloc_cpumask_var(&non_nocb_cpus, GFP_NOIO))
+ return 0;
+ cpumask_andnot(non_nocb_cpus, cpu_online_mask, rcu_nocb_mask);
+ cpumask_clear_cpu(cpu, non_nocb_cpus);
+ ret = !cpumask_empty(non_nocb_cpus);
+ free_cpumask_var(non_nocb_cpus);
+ return ret;
+}
+
+/*
+ * Helper structure for remote registry of RCU callbacks.
+ * This is needed for when a no-CBs CPU needs to start a grace period.
+ * If it just invokes call_rcu(), the resulting callback will be queued,
+ * which can result in deadlock.
+ */
+struct rcu_head_remote {
+ struct rcu_head *rhp;
+ call_rcu_func_t *crf;
+ void (*func)(struct rcu_head *rhp);
+};
+
+/*
+ * Register a callback as specified by the rcu_head_remote struct.
+ * This function is intended to be invoked via smp_call_function_single().
+ */
+static void call_rcu_local(void *arg)
+{
+ struct rcu_head_remote *rhrp =
+ container_of(arg, struct rcu_head_remote, rhp);
+
+ rhrp->crf(rhrp->rhp, rhrp->func);
+}
+
+/*
+ * Set up an rcu_head_remote structure and the invoke call_rcu_local()
+ * on CPU 0 (which is guaranteed to be a non-no-CBs CPU) via
+ * smp_call_function_single().
+ */
+static void invoke_crf_remote(struct rcu_head *rhp,
+ void (*func)(struct rcu_head *rhp),
+ call_rcu_func_t crf)
+{
+ struct rcu_head_remote rhr;
+
+ rhr.rhp = rhp;
+ rhr.crf = crf;
+ rhr.func = func;
+ smp_call_function_single(0, call_rcu_local, &rhr, 1);
+}
+
+/*
+ * Helper functions to be passed to wait_rcu_gp(), each of which
+ * invokes invoke_crf_remote() to register a callback appropriately.
+ */
+static void __maybe_unused
+call_rcu_preempt_remote(struct rcu_head *rhp,
+ void (*func)(struct rcu_head *rhp))
+{
+ invoke_crf_remote(rhp, func, call_rcu);
+}
+static void call_rcu_bh_remote(struct rcu_head *rhp,
+ void (*func)(struct rcu_head *rhp))
+{
+ invoke_crf_remote(rhp, func, call_rcu_bh);
+}
+static void call_rcu_sched_remote(struct rcu_head *rhp,
+ void (*func)(struct rcu_head *rhp))
+{
+ invoke_crf_remote(rhp, func, call_rcu_sched);
+}
+
+/*
+ * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
+ * callbacks queued by the corresponding no-CBs CPU.
+ */
+static int rcu_nocb_kthread(void *arg)
+{
+ int c, cl;
+ struct rcu_head *list;
+ struct rcu_head *next;
+ struct rcu_head **tail;
+ struct rcu_data *rdp = arg;
+
+ /* Each pass through this loop invokes one batch of callbacks */
+ for (;;) {
+ /* If not polling, wait for next batch of callbacks. */
+ if (!rcu_nocb_poll)
+ wait_event(rdp->nocb_wq, rdp->nocb_head);
+ list = ACCESS_ONCE(rdp->nocb_head);
+ if (!list) {
+ schedule_timeout_interruptible(1);
+ continue;
+ }
+
+ /*
+ * Extract queued callbacks, update counts, and wait
+ * for a grace period to elapse.
+ */
+ ACCESS_ONCE(rdp->nocb_head) = NULL;
+ tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
+ c = atomic_long_xchg(&rdp->nocb_q_count, 0);
+ cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
+ ACCESS_ONCE(rdp->nocb_p_count) += c;
+ ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
+ wait_rcu_gp(rdp->rsp->call_remote);
+
+ /* Each pass through the following loop invokes a callback. */
+ trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
+ c = cl = 0;
+ while (list) {
+ next = list->next;
+ /* Wait for enqueuing to complete, if needed. */
+ while (next == NULL && &list->next != tail) {
+ schedule_timeout_interruptible(1);
+ next = list->next;
+ }
+ debug_rcu_head_unqueue(list);
+ local_bh_disable();
+ if (__rcu_reclaim(rdp->rsp->name, list))
+ cl++;
+ c++;
+ local_bh_enable();
+ list = next;
+ }
+ trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
+ ACCESS_ONCE(rdp->nocb_p_count) -= c;
+ ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
+ rdp->n_nocbs_invoked += c;
+ }
+ return 0;
+}
+
+/* Initialize per-rcu_data variables for no-CBs CPUs. */
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+ rdp->nocb_tail = &rdp->nocb_head;
+ init_waitqueue_head(&rdp->nocb_wq);
+}
+
+/* Create a kthread for each RCU flavor for each no-CBs CPU. */
+static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+{
+ int cpu;
+ struct rcu_data *rdp;
+ struct task_struct *t;
+
+ if (rcu_nocb_mask == NULL)
+ return;
+ for_each_cpu(cpu, rcu_nocb_mask) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
+ BUG_ON(IS_ERR(t));
+ ACCESS_ONCE(rdp->nocb_kthread) = t;
+ }
+}
+
+/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
+static void init_nocb_callback_list(struct rcu_data *rdp)
+{
+ if (rcu_nocb_mask == NULL ||
+ !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
+ return;
+ rdp->nxttail[RCU_NEXT_TAIL] = NULL;
+}
+
+/* Initialize the ->call_remote fields in the rcu_state structures. */
+static void __init rcu_init_nocb(void)
+{
+#ifdef CONFIG_PREEMPT_RCU
+ rcu_preempt_state.call_remote = call_rcu_preempt_remote;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+ rcu_bh_state.call_remote = call_rcu_bh_remote;
+ rcu_sched_state.call_remote = call_rcu_sched_remote;
+}
+
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
+static bool is_nocb_cpu(int cpu)
+{
+ return false;
+}
+
+static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
+ bool lazy)
+{
+ return 0;
+}
+
+static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
+ struct rcu_data *rdp)
+{
+ return 0;
+}
+
+static bool nocb_cpu_expendable(int cpu)
+{
+ return 1;
+}
+
+static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
+{
+}
+
+static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
+{
+}
+
+static void init_nocb_callback_list(struct rcu_data *rdp)
+{
+}
+
+static void __init rcu_init_nocb(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 693513bc50e6..0d095dcaa670 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,29 +46,58 @@
#define RCU_TREE_NONCORE
#include "rcutree.h"
-static int show_rcubarrier(struct seq_file *m, void *unused)
+#define ulong2long(a) (*(long *)(&(a)))
+
+static int r_open(struct inode *inode, struct file *file,
+ const struct seq_operations *op)
{
- struct rcu_state *rsp;
+ int ret = seq_open(file, op);
+ if (!ret) {
+ struct seq_file *m = (struct seq_file *)file->private_data;
+ m->private = inode->i_private;
+ }
+ return ret;
+}
+
+static void *r_start(struct seq_file *m, loff_t *pos)
+{
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+ *pos = cpumask_next(*pos - 1, cpu_possible_mask);
+ if ((*pos) < nr_cpu_ids)
+ return per_cpu_ptr(rsp->rda, *pos);
+ return NULL;
+}
- for_each_rcu_flavor(rsp)
- seq_printf(m, "%s: bcc: %d nbd: %lu\n",
- rsp->name,
- atomic_read(&rsp->barrier_cpu_count),
- rsp->n_barrier_done);
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return r_start(m, pos);
+}
+
+static void r_stop(struct seq_file *m, void *v)
+{
+}
+
+static int show_rcubarrier(struct seq_file *m, void *v)
+{
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+ seq_printf(m, "bcc: %d nbd: %lu\n",
+ atomic_read(&rsp->barrier_cpu_count),
+ rsp->n_barrier_done);
return 0;
}
static int rcubarrier_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcubarrier, NULL);
+ return single_open(file, show_rcubarrier, inode->i_private);
}
static const struct file_operations rcubarrier_fops = {
.owner = THIS_MODULE,
.open = rcubarrier_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
#ifdef CONFIG_RCU_BOOST
@@ -84,12 +113,14 @@ static char convert_kthread_status(unsigned int kthread_status)
static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
{
+ long ql, qll;
+
if (!rdp->beenonline)
return;
- seq_printf(m, "%3d%cc=%lu g=%lu pq=%d qp=%d",
+ seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
- rdp->completed, rdp->gpnum,
+ ulong2long(rdp->completed), ulong2long(rdp->gpnum),
rdp->passed_quiesce, rdp->qs_pending);
seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
@@ -97,8 +128,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->dynticks->dynticks_nmi_nesting,
rdp->dynticks_fqs);
seq_printf(m, " of=%lu", rdp->offline_fqs);
+ rcu_nocb_q_lengths(rdp, &ql, &qll);
+ qll += rdp->qlen_lazy;
+ ql += rdp->qlen;
seq_printf(m, " ql=%ld/%ld qs=%c%c%c%c",
- rdp->qlen_lazy, rdp->qlen,
+ qll, ql,
".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
rdp->nxttail[RCU_NEXT_TAIL]],
".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
@@ -114,101 +148,67 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
#endif /* #ifdef CONFIG_RCU_BOOST */
seq_printf(m, " b=%ld", rdp->blimit);
- seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
- rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
+ seq_printf(m, " ci=%lu nci=%lu co=%lu ca=%lu\n",
+ rdp->n_cbs_invoked, rdp->n_nocbs_invoked,
+ rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
-static int show_rcudata(struct seq_file *m, void *unused)
+static int show_rcudata(struct seq_file *m, void *v)
{
- int cpu;
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp) {
- seq_printf(m, "%s:\n", rsp->name);
- for_each_possible_cpu(cpu)
- print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu));
- }
+ print_one_rcu_data(m, (struct rcu_data *)v);
return 0;
}
+static const struct seq_operations rcudate_op = {
+ .start = r_start,
+ .next = r_next,
+ .stop = r_stop,
+ .show = show_rcudata,
+};
+
static int rcudata_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcudata, NULL);
+ return r_open(inode, file, &rcudate_op);
}
static const struct file_operations rcudata_fops = {
.owner = THIS_MODULE,
.open = rcudata_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
-static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
-{
- if (!rdp->beenonline)
- return;
- seq_printf(m, "%d,%s,%lu,%lu,%d,%d",
- rdp->cpu,
- cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"",
- rdp->completed, rdp->gpnum,
- rdp->passed_quiesce, rdp->qs_pending);
- seq_printf(m, ",%d,%llx,%d,%lu",
- atomic_read(&rdp->dynticks->dynticks),
- rdp->dynticks->dynticks_nesting,
- rdp->dynticks->dynticks_nmi_nesting,
- rdp->dynticks_fqs);
- seq_printf(m, ",%lu", rdp->offline_fqs);
- seq_printf(m, ",%ld,%ld,\"%c%c%c%c\"", rdp->qlen_lazy, rdp->qlen,
- ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
- rdp->nxttail[RCU_NEXT_TAIL]],
- ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
- rdp->nxttail[RCU_NEXT_READY_TAIL]],
- ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
- rdp->nxttail[RCU_WAIT_TAIL]],
- ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
-#ifdef CONFIG_RCU_BOOST
- seq_printf(m, ",%d,\"%c\"",
- per_cpu(rcu_cpu_has_work, rdp->cpu),
- convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
- rdp->cpu)));
-#endif /* #ifdef CONFIG_RCU_BOOST */
- seq_printf(m, ",%ld", rdp->blimit);
- seq_printf(m, ",%lu,%lu,%lu\n",
- rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
-}
-
-static int show_rcudata_csv(struct seq_file *m, void *unused)
+static int show_rcuexp(struct seq_file *m, void *v)
{
- int cpu;
- struct rcu_state *rsp;
-
- seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pq\",");
- seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
- seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
-#ifdef CONFIG_RCU_BOOST
- seq_puts(m, "\"kt\",\"ktl\"");
-#endif /* #ifdef CONFIG_RCU_BOOST */
- seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
- for_each_rcu_flavor(rsp) {
- seq_printf(m, "\"%s:\"\n", rsp->name);
- for_each_possible_cpu(cpu)
- print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu));
- }
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+
+ seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
+ atomic_long_read(&rsp->expedited_start),
+ atomic_long_read(&rsp->expedited_done),
+ atomic_long_read(&rsp->expedited_wrap),
+ atomic_long_read(&rsp->expedited_tryfail),
+ atomic_long_read(&rsp->expedited_workdone1),
+ atomic_long_read(&rsp->expedited_workdone2),
+ atomic_long_read(&rsp->expedited_normal),
+ atomic_long_read(&rsp->expedited_stoppedcpus),
+ atomic_long_read(&rsp->expedited_done_tries),
+ atomic_long_read(&rsp->expedited_done_lost),
+ atomic_long_read(&rsp->expedited_done_exit));
return 0;
}
-static int rcudata_csv_open(struct inode *inode, struct file *file)
+static int rcuexp_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcudata_csv, NULL);
+ return single_open(file, show_rcuexp, inode->i_private);
}
-static const struct file_operations rcudata_csv_fops = {
+static const struct file_operations rcuexp_fops = {
.owner = THIS_MODULE,
- .open = rcudata_csv_open,
+ .open = rcuexp_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
#ifdef CONFIG_RCU_BOOST
@@ -254,27 +254,11 @@ static const struct file_operations rcu_node_boost_fops = {
.owner = THIS_MODULE,
.open = rcu_node_boost_open,
.read = seq_read,
- .llseek = seq_lseek,
+ .llseek = no_llseek,
.release = single_release,
};
-/*
- * Create the rcuboost debugfs entry. Standard error return.
- */
-static int rcu_boost_trace_create_file(struct dentry *rcudir)
-{
- return !debugfs_create_file("rcuboost", 0444, rcudir, NULL,
- &rcu_node_boost_fops);
-}
-
-#else /* #ifdef CONFIG_RCU_BOOST */
-
-static int rcu_boost_trace_create_file(struct dentry *rcudir)
-{
- return 0; /* There cannot be an error if we didn't create it! */
-}
-
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
+#endif /* #ifdef CONFIG_RCU_BOOST */
static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
{
@@ -283,8 +267,9 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
struct rcu_node *rnp;
gpnum = rsp->gpnum;
- seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x ",
- rsp->name, rsp->completed, gpnum, rsp->fqs_state,
+ seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x ",
+ ulong2long(rsp->completed), ulong2long(gpnum),
+ rsp->fqs_state,
(long)(rsp->jiffies_force_qs - jiffies),
(int)(jiffies & 0xffff));
seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
@@ -306,26 +291,24 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
seq_puts(m, "\n");
}
-static int show_rcuhier(struct seq_file *m, void *unused)
+static int show_rcuhier(struct seq_file *m, void *v)
{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- print_one_rcu_state(m, rsp);
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+ print_one_rcu_state(m, rsp);
return 0;
}
static int rcuhier_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcuhier, NULL);
+ return single_open(file, show_rcuhier, inode->i_private);
}
static const struct file_operations rcuhier_fops = {
.owner = THIS_MODULE,
.open = rcuhier_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
@@ -338,42 +321,42 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
struct rcu_node *rnp = &rsp->node[0];
raw_spin_lock_irqsave(&rnp->lock, flags);
- completed = rsp->completed;
- gpnum = rsp->gpnum;
- if (rsp->completed == rsp->gpnum)
+ completed = ACCESS_ONCE(rsp->completed);
+ gpnum = ACCESS_ONCE(rsp->gpnum);
+ if (completed == gpnum)
gpage = 0;
else
gpage = jiffies - rsp->gp_start;
gpmax = rsp->gp_max;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- seq_printf(m, "%s: completed=%ld gpnum=%lu age=%ld max=%ld\n",
- rsp->name, completed, gpnum, gpage, gpmax);
+ seq_printf(m, "completed=%ld gpnum=%ld age=%ld max=%ld\n",
+ ulong2long(completed), ulong2long(gpnum), gpage, gpmax);
}
-static int show_rcugp(struct seq_file *m, void *unused)
+static int show_rcugp(struct seq_file *m, void *v)
{
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp)
- show_one_rcugp(m, rsp);
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+ show_one_rcugp(m, rsp);
return 0;
}
static int rcugp_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcugp, NULL);
+ return single_open(file, show_rcugp, inode->i_private);
}
static const struct file_operations rcugp_fops = {
.owner = THIS_MODULE,
.open = rcugp_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
{
+ if (!rdp->beenonline)
+ return;
seq_printf(m, "%3d%cnp=%ld ",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
@@ -389,34 +372,30 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
rdp->n_rp_need_nothing);
}
-static int show_rcu_pending(struct seq_file *m, void *unused)
+static int show_rcu_pending(struct seq_file *m, void *v)
{
- int cpu;
- struct rcu_data *rdp;
- struct rcu_state *rsp;
-
- for_each_rcu_flavor(rsp) {
- seq_printf(m, "%s:\n", rsp->name);
- for_each_possible_cpu(cpu) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- if (rdp->beenonline)
- print_one_rcu_pending(m, rdp);
- }
- }
+ print_one_rcu_pending(m, (struct rcu_data *)v);
return 0;
}
+static const struct seq_operations rcu_pending_op = {
+ .start = r_start,
+ .next = r_next,
+ .stop = r_stop,
+ .show = show_rcu_pending,
+};
+
static int rcu_pending_open(struct inode *inode, struct file *file)
{
- return single_open(file, show_rcu_pending, NULL);
+ return r_open(inode, file, &rcu_pending_op);
}
static const struct file_operations rcu_pending_fops = {
.owner = THIS_MODULE,
.open = rcu_pending_open,
.read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .llseek = no_llseek,
+ .release = seq_release,
};
static int show_rcutorture(struct seq_file *m, void *unused)
@@ -446,43 +425,58 @@ static struct dentry *rcudir;
static int __init rcutree_trace_init(void)
{
+ struct rcu_state *rsp;
struct dentry *retval;
+ struct dentry *rspdir;
rcudir = debugfs_create_dir("rcu", NULL);
if (!rcudir)
goto free_out;
- retval = debugfs_create_file("rcubarrier", 0444, rcudir,
- NULL, &rcubarrier_fops);
- if (!retval)
- goto free_out;
-
- retval = debugfs_create_file("rcudata", 0444, rcudir,
- NULL, &rcudata_fops);
- if (!retval)
- goto free_out;
-
- retval = debugfs_create_file("rcudata.csv", 0444, rcudir,
- NULL, &rcudata_csv_fops);
- if (!retval)
- goto free_out;
-
- if (rcu_boost_trace_create_file(rcudir))
- goto free_out;
+ for_each_rcu_flavor(rsp) {
+ rspdir = debugfs_create_dir(rsp->name, rcudir);
+ if (!rspdir)
+ goto free_out;
+
+ retval = debugfs_create_file("rcudata", 0444,
+ rspdir, rsp, &rcudata_fops);
+ if (!retval)
+ goto free_out;
+
+ retval = debugfs_create_file("rcuexp", 0444,
+ rspdir, rsp, &rcuexp_fops);
+ if (!retval)
+ goto free_out;
+
+ retval = debugfs_create_file("rcu_pending", 0444,
+ rspdir, rsp, &rcu_pending_fops);
+ if (!retval)
+ goto free_out;
+
+ retval = debugfs_create_file("rcubarrier", 0444,
+ rspdir, rsp, &rcubarrier_fops);
+ if (!retval)
+ goto free_out;
- retval = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
- if (!retval)
- goto free_out;
+#ifdef CONFIG_RCU_BOOST
+ if (rsp == &rcu_preempt_state) {
+ retval = debugfs_create_file("rcuboost", 0444,
+ rspdir, NULL, &rcu_node_boost_fops);
+ if (!retval)
+ goto free_out;
+ }
+#endif
- retval = debugfs_create_file("rcuhier", 0444, rcudir,
- NULL, &rcuhier_fops);
- if (!retval)
- goto free_out;
+ retval = debugfs_create_file("rcugp", 0444,
+ rspdir, rsp, &rcugp_fops);
+ if (!retval)
+ goto free_out;
- retval = debugfs_create_file("rcu_pending", 0444, rcudir,
- NULL, &rcu_pending_fops);
- if (!retval)
- goto free_out;
+ retval = debugfs_create_file("rcuhier", 0444,
+ rspdir, rsp, &rcuhier_fops);
+ if (!retval)
+ goto free_out;
+ }
retval = debugfs_create_file("rcutorture", 0444, rcudir,
NULL, &rcutorture_fops);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2d8927fda712..f5066a61f971 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -72,6 +72,7 @@
#include <linux/slab.h>
#include <linux/init_task.h>
#include <linux/binfmts.h>
+#include <linux/context_tracking.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -952,6 +953,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
+ if (p->sched_class->migrate_task_rq)
+ p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
}
@@ -1524,6 +1527,15 @@ static void __sched_fork(struct task_struct *p)
p->se.vruntime = 0;
INIT_LIST_HEAD(&p->se.group_node);
+/*
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
+ * removed when useful for applications beyond shares distribution (e.g.
+ * load-balance).
+ */
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+ p->se.avg.runnable_avg_period = 0;
+ p->se.avg.runnable_avg_sum = 0;
+#endif
#ifdef CONFIG_SCHEDSTATS
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
@@ -1886,8 +1898,8 @@ context_switch(struct rq *rq, struct task_struct *prev,
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
+ context_tracking_task_switch(prev, next);
/* Here we just switch the register state and the stack. */
- rcu_switch(prev, next);
switch_to(prev, next, prev);
barrier();
@@ -2911,7 +2923,7 @@ asmlinkage void __sched schedule(void)
}
EXPORT_SYMBOL(schedule);
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void __sched schedule_user(void)
{
/*
@@ -2920,9 +2932,9 @@ asmlinkage void __sched schedule_user(void)
* we haven't yet exited the RCU idle mode. Do it here manually until
* we find a better solution.
*/
- rcu_user_exit();
+ user_exit();
schedule();
- rcu_user_enter();
+ user_enter();
}
#endif
@@ -3027,7 +3039,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
/* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled());
- rcu_user_exit();
+ user_exit();
do {
add_preempt_count(PREEMPT_ACTIVE);
local_irq_enable();
@@ -4474,6 +4486,7 @@ static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
void sched_show_task(struct task_struct *p)
{
unsigned long free = 0;
+ int ppid;
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
@@ -4493,8 +4506,11 @@ void sched_show_task(struct task_struct *p)
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
#endif
+ rcu_read_lock();
+ ppid = task_pid_nr(rcu_dereference(p->real_parent));
+ rcu_read_unlock();
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
- task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)),
+ task_pid_nr(p), ppid,
(unsigned long)task_thread_info(p)->flags);
show_stack(p, NULL);
@@ -8076,3 +8092,9 @@ struct cgroup_subsys cpuacct_subsys = {
.base_cftypes = files,
};
#endif /* CONFIG_CGROUP_CPUACCT */
+
+void dump_cpu_task(int cpu)
+{
+ pr_info("Task dump for CPU %d:\n", cpu);
+ sched_show_task(cpu_curr(cpu));
+}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 81b763ba58a6..293b202fcf79 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
*/
-void vtime_account(struct task_struct *curr)
+void irqtime_account_irq(struct task_struct *curr)
{
unsigned long flags;
s64 delta;
@@ -73,7 +73,7 @@ void vtime_account(struct task_struct *curr)
irq_time_write_end();
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(vtime_account);
+EXPORT_SYMBOL_GPL(irqtime_account_irq);
static int irqtime_account_hi_update(void)
{
@@ -288,6 +288,34 @@ static __always_inline bool steal_account_process_tick(void)
return false;
}
+/*
+ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
+ * tasks (sum on group iteration) belonging to @tsk's group.
+ */
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
+{
+ struct signal_struct *sig = tsk->signal;
+ struct task_struct *t;
+
+ times->utime = sig->utime;
+ times->stime = sig->stime;
+ times->sum_exec_runtime = sig->sum_sched_runtime;
+
+ rcu_read_lock();
+ /* make sure we can trust tsk->thread_group list */
+ if (!likely(pid_alive(tsk)))
+ goto out;
+
+ t = tsk;
+ do {
+ times->utime += t->utime;
+ times->stime += t->stime;
+ times->sum_exec_runtime += task_sched_runtime(t);
+ } while_each_thread(tsk, t);
+out:
+ rcu_read_unlock();
+}
+
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -417,13 +445,13 @@ void account_idle_ticks(unsigned long ticks)
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
*ut = p->utime;
*st = p->stime;
}
-void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
struct task_cputime cputime;
@@ -433,6 +461,29 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*st = cputime.stime;
}
+void vtime_account_system_irqsafe(struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ vtime_account_system(tsk);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
+
+#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
+void vtime_task_switch(struct task_struct *prev)
+{
+ if (is_idle_task(prev))
+ vtime_account_idle(prev);
+ else
+ vtime_account_system(prev);
+
+ vtime_account_user(prev);
+ arch_vtime_task_switch(prev);
+}
+#endif
+
/*
* Archs that account the whole time spent in the idle task
* (outside irq) as idle time can rely on this and just implement
@@ -444,16 +495,10 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
#ifndef __ARCH_HAS_VTIME_ACCOUNT
void vtime_account(struct task_struct *tsk)
{
- unsigned long flags;
-
- local_irq_save(flags);
-
if (in_interrupt() || !is_idle_task(tsk))
vtime_account_system(tsk);
else
vtime_account_idle(tsk);
-
- local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(vtime_account);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
@@ -478,14 +523,30 @@ static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
return (__force cputime_t) temp;
}
-void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+/*
+ * Adjust tick based cputime random precision against scheduler
+ * runtime accounting.
+ */
+static void cputime_adjust(struct task_cputime *curr,
+ struct cputime *prev,
+ cputime_t *ut, cputime_t *st)
{
- cputime_t rtime, utime = p->utime, total = utime + p->stime;
+ cputime_t rtime, utime, total;
+
+ utime = curr->utime;
+ total = utime + curr->stime;
/*
- * Use CFS's precise accounting:
+ * Tick based cputime accounting depend on random scheduling
+ * timeslices of a task to be interrupted or not by the timer.
+ * Depending on these circumstances, the number of these interrupts
+ * may be over or under-optimistic, matching the real user and system
+ * cputime with a variable precision.
+ *
+ * Fix this by scaling these tick based values against the total
+ * runtime accounted by the CFS scheduler.
*/
- rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
+ rtime = nsecs_to_cputime(curr->sum_exec_runtime);
if (total)
utime = scale_utime(utime, rtime, total);
@@ -493,38 +554,36 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
utime = rtime;
/*
- * Compare with previous values, to keep monotonicity:
+ * If the tick based count grows faster than the scheduler one,
+ * the result of the scaling may go backward.
+ * Let's enforce monotonicity.
*/
- p->prev_utime = max(p->prev_utime, utime);
- p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
+ prev->utime = max(prev->utime, utime);
+ prev->stime = max(prev->stime, rtime - prev->utime);
- *ut = p->prev_utime;
- *st = p->prev_stime;
+ *ut = prev->utime;
+ *st = prev->stime;
+}
+
+void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ struct task_cputime cputime = {
+ .utime = p->utime,
+ .stime = p->stime,
+ .sum_exec_runtime = p->se.sum_exec_runtime,
+ };
+
+ cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
/*
* Must be called with siglock held.
*/
-void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
- struct signal_struct *sig = p->signal;
struct task_cputime cputime;
- cputime_t rtime, utime, total;
thread_group_cputime(p, &cputime);
-
- total = cputime.utime + cputime.stime;
- rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
-
- if (total)
- utime = scale_utime(cputime.utime, rtime, total);
- else
- utime = rtime;
-
- sig->prev_utime = max(sig->prev_utime, utime);
- sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
-
- *ut = sig->prev_utime;
- *st = sig->prev_stime;
+ cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 6f79596e0ea9..2cd3c1b4e582 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -61,14 +61,20 @@ static unsigned long nsec_low(unsigned long long nsec)
static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
{
struct sched_entity *se = tg->se[cpu];
- if (!se)
- return;
#define P(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
+ if (!se) {
+ struct sched_avg *avg = &cpu_rq(cpu)->avg;
+ P(avg->runnable_avg_sum);
+ P(avg->runnable_avg_period);
+ return;
+ }
+
+
PN(se->exec_start);
PN(se->vruntime);
PN(se->sum_exec_runtime);
@@ -85,6 +91,12 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
P(se->statistics.wait_count);
#endif
P(se->load.weight);
+#ifdef CONFIG_SMP
+ P(se->avg.runnable_avg_sum);
+ P(se->avg.runnable_avg_period);
+ P(se->avg.load_avg_contrib);
+ P(se->avg.decay_count);
+#endif
#undef PN
#undef P
}
@@ -206,14 +218,18 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_FAIR_GROUP_SCHED
#ifdef CONFIG_SMP
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
- SPLIT_NS(cfs_rq->load_avg));
- SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
- SPLIT_NS(cfs_rq->load_period));
- SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
- cfs_rq->load_contribution);
- SEQ_printf(m, " .%-30s: %d\n", "load_tg",
- atomic_read(&cfs_rq->tg->load_weight));
+ SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg",
+ cfs_rq->runnable_load_avg);
+ SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
+ cfs_rq->blocked_load_avg);
+ SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
+ atomic64_read(&cfs_rq->tg->load_avg));
+ SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
+ cfs_rq->tg_load_contrib);
+ SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
+ cfs_rq->tg_runnable_contrib);
+ SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
+ atomic_read(&cfs_rq->tg->runnable_avg));
#endif
print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6b800a14b990..59e072b2db97 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -259,6 +259,9 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
return grp->my_q;
}
+static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
+ int force_update);
+
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
@@ -278,6 +281,8 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
}
cfs_rq->on_list = 1;
+ /* We should have no load, but we need to update last_decay. */
+ update_cfs_rq_blocked_load(cfs_rq, 0);
}
}
@@ -653,9 +658,6 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
return calc_delta_fair(sched_slice(cfs_rq, se), se);
}
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
-static void update_cfs_shares(struct cfs_rq *cfs_rq);
-
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -675,10 +677,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
curr->vruntime += delta_exec_weighted;
update_min_vruntime(cfs_rq);
-
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
- cfs_rq->load_unacc_exec_time += delta_exec;
-#endif
}
static void update_curr(struct cfs_rq *cfs_rq)
@@ -801,72 +799,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-/* we need this in update_cfs_load and load-balance functions below */
-static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
# ifdef CONFIG_SMP
-static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
- int global_update)
-{
- struct task_group *tg = cfs_rq->tg;
- long load_avg;
-
- load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
- load_avg -= cfs_rq->load_contribution;
-
- if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
- atomic_add(load_avg, &tg->load_weight);
- cfs_rq->load_contribution += load_avg;
- }
-}
-
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
-{
- u64 period = sysctl_sched_shares_window;
- u64 now, delta;
- unsigned long load = cfs_rq->load.weight;
-
- if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
- return;
-
- now = rq_of(cfs_rq)->clock_task;
- delta = now - cfs_rq->load_stamp;
-
- /* truncate load history at 4 idle periods */
- if (cfs_rq->load_stamp > cfs_rq->load_last &&
- now - cfs_rq->load_last > 4 * period) {
- cfs_rq->load_period = 0;
- cfs_rq->load_avg = 0;
- delta = period - 1;
- }
-
- cfs_rq->load_stamp = now;
- cfs_rq->load_unacc_exec_time = 0;
- cfs_rq->load_period += delta;
- if (load) {
- cfs_rq->load_last = now;
- cfs_rq->load_avg += delta * load;
- }
-
- /* consider updating load contribution on each fold or truncate */
- if (global_update || cfs_rq->load_period > period
- || !cfs_rq->load_period)
- update_cfs_rq_load_contribution(cfs_rq, global_update);
-
- while (cfs_rq->load_period > period) {
- /*
- * Inline assembly required to prevent the compiler
- * optimising this loop into a divmod call.
- * See __iter_div_u64_rem() for another example of this.
- */
- asm("" : "+rm" (cfs_rq->load_period));
- cfs_rq->load_period /= 2;
- cfs_rq->load_avg /= 2;
- }
-
- if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
- list_del_leaf_cfs_rq(cfs_rq);
-}
-
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
{
long tg_weight;
@@ -876,8 +809,8 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
* to gain a more accurate current total weight. See
* update_cfs_rq_load_contribution().
*/
- tg_weight = atomic_read(&tg->load_weight);
- tg_weight -= cfs_rq->load_contribution;
+ tg_weight = atomic64_read(&tg->load_avg);
+ tg_weight -= cfs_rq->tg_load_contrib;
tg_weight += cfs_rq->load.weight;
return tg_weight;
@@ -901,27 +834,11 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
return shares;
}
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
- if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
- }
-}
# else /* CONFIG_SMP */
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
-{
-}
-
static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
return tg->shares;
}
-
-static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-}
# endif /* CONFIG_SMP */
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight)
@@ -939,6 +856,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
account_entity_enqueue(cfs_rq, se);
}
+static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
+
static void update_cfs_shares(struct cfs_rq *cfs_rq)
{
struct task_group *tg;
@@ -958,18 +877,478 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
reweight_entity(cfs_rq_of(se), se, shares);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
-static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
{
}
+#endif /* CONFIG_FAIR_GROUP_SCHED */
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
+/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+/*
+ * We choose a half-life close to 1 scheduling period.
+ * Note: The tables below are dependent on this value.
+ */
+#define LOAD_AVG_PERIOD 32
+#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
+#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
+
+/* Precomputed fixed inverse multiplies for multiplication by y^n */
+static const u32 runnable_avg_yN_inv[] = {
+ 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
+ 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
+ 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
+ 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
+ 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
+ 0x85aac367, 0x82cd8698,
+};
+
+/*
+ * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
+ * over-estimates when re-combining.
+ */
+static const u32 runnable_avg_yN_sum[] = {
+ 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
+ 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
+ 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
+};
+
+/*
+ * Approximate:
+ * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
+ */
+static __always_inline u64 decay_load(u64 val, u64 n)
{
+ unsigned int local_n;
+
+ if (!n)
+ return val;
+ else if (unlikely(n > LOAD_AVG_PERIOD * 63))
+ return 0;
+
+ /* after bounds checking we can collapse to 32-bit */
+ local_n = n;
+
+ /*
+ * As y^PERIOD = 1/2, we can combine
+ * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
+ * With a look-up table which covers k^n (n<PERIOD)
+ *
+ * To achieve constant time decay_load.
+ */
+ if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
+ val >>= local_n / LOAD_AVG_PERIOD;
+ local_n %= LOAD_AVG_PERIOD;
+ }
+
+ val *= runnable_avg_yN_inv[local_n];
+ /* We don't use SRR here since we always want to round down. */
+ return val >> 32;
}
-static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+/*
+ * For updates fully spanning n periods, the contribution to runnable
+ * average will be: \Sum 1024*y^n
+ *
+ * We can compute this reasonably efficiently by combining:
+ * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
+ */
+static u32 __compute_runnable_contrib(u64 n)
{
+ u32 contrib = 0;
+
+ if (likely(n <= LOAD_AVG_PERIOD))
+ return runnable_avg_yN_sum[n];
+ else if (unlikely(n >= LOAD_AVG_MAX_N))
+ return LOAD_AVG_MAX;
+
+ /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
+ do {
+ contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
+ contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
+
+ n -= LOAD_AVG_PERIOD;
+ } while (n > LOAD_AVG_PERIOD);
+
+ contrib = decay_load(contrib, n);
+ return contrib + runnable_avg_yN_sum[n];
}
-#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+/*
+ * We can represent the historical contribution to runnable average as the
+ * coefficients of a geometric series. To do this we sub-divide our runnable
+ * history into segments of approximately 1ms (1024us); label the segment that
+ * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
+ *
+ * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
+ * p0 p1 p2
+ * (now) (~1ms ago) (~2ms ago)
+ *
+ * Let u_i denote the fraction of p_i that the entity was runnable.
+ *
+ * We then designate the fractions u_i as our co-efficients, yielding the
+ * following representation of historical load:
+ * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
+ *
+ * We choose y based on the with of a reasonably scheduling period, fixing:
+ * y^32 = 0.5
+ *
+ * This means that the contribution to load ~32ms ago (u_32) will be weighted
+ * approximately half as much as the contribution to load within the last ms
+ * (u_0).
+ *
+ * When a period "rolls over" and we have new u_0`, multiplying the previous
+ * sum again by y is sufficient to update:
+ * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
+ * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
+ */
+static __always_inline int __update_entity_runnable_avg(u64 now,
+ struct sched_avg *sa,
+ int runnable)
+{
+ u64 delta, periods;
+ u32 runnable_contrib;
+ int delta_w, decayed = 0;
+
+ delta = now - sa->last_runnable_update;
+ /*
+ * This should only happen when time goes backwards, which it
+ * unfortunately does during sched clock init when we swap over to TSC.
+ */
+ if ((s64)delta < 0) {
+ sa->last_runnable_update = now;
+ return 0;
+ }
+
+ /*
+ * Use 1024ns as the unit of measurement since it's a reasonable
+ * approximation of 1us and fast to compute.
+ */
+ delta >>= 10;
+ if (!delta)
+ return 0;
+ sa->last_runnable_update = now;
+
+ /* delta_w is the amount already accumulated against our next period */
+ delta_w = sa->runnable_avg_period % 1024;
+ if (delta + delta_w >= 1024) {
+ /* period roll-over */
+ decayed = 1;
+
+ /*
+ * Now that we know we're crossing a period boundary, figure
+ * out how much from delta we need to complete the current
+ * period and accrue it.
+ */
+ delta_w = 1024 - delta_w;
+ if (runnable)
+ sa->runnable_avg_sum += delta_w;
+ sa->runnable_avg_period += delta_w;
+
+ delta -= delta_w;
+
+ /* Figure out how many additional periods this update spans */
+ periods = delta / 1024;
+ delta %= 1024;
+
+ sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
+ periods + 1);
+ sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
+ periods + 1);
+
+ /* Efficiently calculate \sum (1..n_period) 1024*y^i */
+ runnable_contrib = __compute_runnable_contrib(periods);
+ if (runnable)
+ sa->runnable_avg_sum += runnable_contrib;
+ sa->runnable_avg_period += runnable_contrib;
+ }
+
+ /* Remainder of delta accrued against u_0` */
+ if (runnable)
+ sa->runnable_avg_sum += delta;
+ sa->runnable_avg_period += delta;
+
+ return decayed;
+}
+
+/* Synchronize an entity's decay with its parenting cfs_rq.*/
+static inline u64 __synchronize_entity_decay(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 decays = atomic64_read(&cfs_rq->decay_counter);
+
+ decays -= se->avg.decay_count;
+ if (!decays)
+ return 0;
+
+ se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
+ se->avg.decay_count = 0;
+
+ return decays;
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+ int force_update)
+{
+ struct task_group *tg = cfs_rq->tg;
+ s64 tg_contrib;
+
+ tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
+ tg_contrib -= cfs_rq->tg_load_contrib;
+
+ if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
+ atomic64_add(tg_contrib, &tg->load_avg);
+ cfs_rq->tg_load_contrib += tg_contrib;
+ }
+}
+
+/*
+ * Aggregate cfs_rq runnable averages into an equivalent task_group
+ * representation for computing load contributions.
+ */
+static inline void __update_tg_runnable_avg(struct sched_avg *sa,
+ struct cfs_rq *cfs_rq)
+{
+ struct task_group *tg = cfs_rq->tg;
+ long contrib;
+
+ /* The fraction of a cpu used by this cfs_rq */
+ contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
+ sa->runnable_avg_period + 1);
+ contrib -= cfs_rq->tg_runnable_contrib;
+
+ if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
+ atomic_add(contrib, &tg->runnable_avg);
+ cfs_rq->tg_runnable_contrib += contrib;
+ }
+}
+
+static inline void __update_group_entity_contrib(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = group_cfs_rq(se);
+ struct task_group *tg = cfs_rq->tg;
+ int runnable_avg;
+
+ u64 contrib;
+
+ contrib = cfs_rq->tg_load_contrib * tg->shares;
+ se->avg.load_avg_contrib = div64_u64(contrib,
+ atomic64_read(&tg->load_avg) + 1);
+
+ /*
+ * For group entities we need to compute a correction term in the case
+ * that they are consuming <1 cpu so that we would contribute the same
+ * load as a task of equal weight.
+ *
+ * Explicitly co-ordinating this measurement would be expensive, but
+ * fortunately the sum of each cpus contribution forms a usable
+ * lower-bound on the true value.
+ *
+ * Consider the aggregate of 2 contributions. Either they are disjoint
+ * (and the sum represents true value) or they are disjoint and we are
+ * understating by the aggregate of their overlap.
+ *
+ * Extending this to N cpus, for a given overlap, the maximum amount we
+ * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
+ * cpus that overlap for this interval and w_i is the interval width.
+ *
+ * On a small machine; the first term is well-bounded which bounds the
+ * total error since w_i is a subset of the period. Whereas on a
+ * larger machine, while this first term can be larger, if w_i is the
+ * of consequential size guaranteed to see n_i*w_i quickly converge to
+ * our upper bound of 1-cpu.
+ */
+ runnable_avg = atomic_read(&tg->runnable_avg);
+ if (runnable_avg < NICE_0_LOAD) {
+ se->avg.load_avg_contrib *= runnable_avg;
+ se->avg.load_avg_contrib >>= NICE_0_SHIFT;
+ }
+}
+#else
+static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
+ int force_update) {}
+static inline void __update_tg_runnable_avg(struct sched_avg *sa,
+ struct cfs_rq *cfs_rq) {}
+static inline void __update_group_entity_contrib(struct sched_entity *se) {}
+#endif
+
+static inline void __update_task_entity_contrib(struct sched_entity *se)
+{
+ u32 contrib;
+
+ /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
+ contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
+ contrib /= (se->avg.runnable_avg_period + 1);
+ se->avg.load_avg_contrib = scale_load(contrib);
+}
+
+/* Compute the current contribution to load_avg by se, return any delta */
+static long __update_entity_load_avg_contrib(struct sched_entity *se)
+{
+ long old_contrib = se->avg.load_avg_contrib;
+
+ if (entity_is_task(se)) {
+ __update_task_entity_contrib(se);
+ } else {
+ __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
+ __update_group_entity_contrib(se);
+ }
+
+ return se->avg.load_avg_contrib - old_contrib;
+}
+
+static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
+ long load_contrib)
+{
+ if (likely(load_contrib < cfs_rq->blocked_load_avg))
+ cfs_rq->blocked_load_avg -= load_contrib;
+ else
+ cfs_rq->blocked_load_avg = 0;
+}
+
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+
+/* Update a sched_entity's runnable average */
+static inline void update_entity_load_avg(struct sched_entity *se,
+ int update_cfs_rq)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ long contrib_delta;
+ u64 now;
+
+ /*
+ * For a group entity we need to use their owned cfs_rq_clock_task() in
+ * case they are the parent of a throttled hierarchy.
+ */
+ if (entity_is_task(se))
+ now = cfs_rq_clock_task(cfs_rq);
+ else
+ now = cfs_rq_clock_task(group_cfs_rq(se));
+
+ if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
+ return;
+
+ contrib_delta = __update_entity_load_avg_contrib(se);
+
+ if (!update_cfs_rq)
+ return;
+
+ if (se->on_rq)
+ cfs_rq->runnable_load_avg += contrib_delta;
+ else
+ subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+}
+
+/*
+ * Decay the load contributed by all blocked children and account this so that
+ * their contribution may appropriately discounted when they wake up.
+ */
+static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
+{
+ u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
+ u64 decays;
+
+ decays = now - cfs_rq->last_decay;
+ if (!decays && !force_update)
+ return;
+
+ if (atomic64_read(&cfs_rq->removed_load)) {
+ u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
+ subtract_blocked_load_contrib(cfs_rq, removed_load);
+ }
+
+ if (decays) {
+ cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
+ decays);
+ atomic64_add(decays, &cfs_rq->decay_counter);
+ cfs_rq->last_decay = now;
+ }
+
+ __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
+ update_cfs_shares(cfs_rq);
+}
+
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
+{
+ __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
+ __update_tg_runnable_avg(&rq->avg, &rq->cfs);
+}
+
+/* Add the load generated by se into cfs_rq's child load-average */
+static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se,
+ int wakeup)
+{
+ /*
+ * We track migrations using entity decay_count <= 0, on a wake-up
+ * migration we use a negative decay count to track the remote decays
+ * accumulated while sleeping.
+ */
+ if (unlikely(se->avg.decay_count <= 0)) {
+ se->avg.last_runnable_update = rq_of(cfs_rq)->clock_task;
+ if (se->avg.decay_count) {
+ /*
+ * In a wake-up migration we have to approximate the
+ * time sleeping. This is because we can't synchronize
+ * clock_task between the two cpus, and it is not
+ * guaranteed to be read-safe. Instead, we can
+ * approximate this using our carried decays, which are
+ * explicitly atomically readable.
+ */
+ se->avg.last_runnable_update -= (-se->avg.decay_count)
+ << 20;
+ update_entity_load_avg(se, 0);
+ /* Indicate that we're now synchronized and on-rq */
+ se->avg.decay_count = 0;
+ }
+ wakeup = 0;
+ } else {
+ __synchronize_entity_decay(se);
+ }
+
+ /* migrated tasks did not contribute to our blocked load */
+ if (wakeup) {
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
+ update_entity_load_avg(se, 0);
+ }
+
+ cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
+ /* we force update consideration on load-balancer moves */
+ update_cfs_rq_blocked_load(cfs_rq, !wakeup);
+}
+
+/*
+ * Remove se's load from this cfs_rq child load-average, if the entity is
+ * transitioning to a blocked state we track its projected decay using
+ * blocked_load_avg.
+ */
+static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se,
+ int sleep)
+{
+ update_entity_load_avg(se, 1);
+ /* we force update consideration on load-balancer moves */
+ update_cfs_rq_blocked_load(cfs_rq, !sleep);
+
+ cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
+ if (sleep) {
+ cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
+ se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
+ } /* migrations, e.g. sleep=0 leave decay_count == 0 */
+}
+#else
+static inline void update_entity_load_avg(struct sched_entity *se,
+ int update_cfs_rq) {}
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se,
+ int wakeup) {}
+static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
+ struct sched_entity *se,
+ int sleep) {}
+static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
+ int force_update) {}
+#endif
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -1096,9 +1475,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
- update_cfs_load(cfs_rq, 0);
account_entity_enqueue(cfs_rq, se);
- update_cfs_shares(cfs_rq);
+ enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
if (flags & ENQUEUE_WAKEUP) {
place_entity(cfs_rq, se, 0);
@@ -1190,9 +1568,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
- se->on_rq = 0;
- update_cfs_load(cfs_rq, 0);
account_entity_dequeue(cfs_rq, se);
+ dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
/*
* Normalize the entity after updating the min_vruntime because the
@@ -1206,7 +1583,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return_cfs_rq_runtime(cfs_rq);
update_min_vruntime(cfs_rq);
- update_cfs_shares(cfs_rq);
+ se->on_rq = 0;
}
/*
@@ -1340,6 +1717,8 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
update_stats_wait_start(cfs_rq, prev);
/* Put 'current' back into the tree. */
__enqueue_entity(cfs_rq, prev);
+ /* in !on_rq case, update occurred at dequeue */
+ update_entity_load_avg(prev, 1);
}
cfs_rq->curr = NULL;
}
@@ -1353,9 +1732,10 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
update_curr(cfs_rq);
/*
- * Update share accounting for long-running entities.
+ * Ensure that runnable average is periodically updated.
*/
- update_entity_shares_tick(cfs_rq);
+ update_entity_load_avg(curr, 1);
+ update_cfs_rq_blocked_load(cfs_rq, 1);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -1448,6 +1828,15 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
return &tg->cfs_bandwidth;
}
+/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
+{
+ if (unlikely(cfs_rq->throttle_count))
+ return cfs_rq->throttled_clock_task;
+
+ return rq_of(cfs_rq)->clock_task - cfs_rq->throttled_clock_task_time;
+}
+
/* returns 0 on failure to allocate runtime */
static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
@@ -1592,14 +1981,9 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
cfs_rq->throttle_count--;
#ifdef CONFIG_SMP
if (!cfs_rq->throttle_count) {
- u64 delta = rq->clock_task - cfs_rq->load_stamp;
-
- /* leaving throttled state, advance shares averaging windows */
- cfs_rq->load_stamp += delta;
- cfs_rq->load_last += delta;
-
- /* update entity weight now that we are on_rq again */
- update_cfs_shares(cfs_rq);
+ /* adjust cfs_rq_clock_task() */
+ cfs_rq->throttled_clock_task_time += rq->clock_task -
+ cfs_rq->throttled_clock_task;
}
#endif
@@ -1611,9 +1995,9 @@ static int tg_throttle_down(struct task_group *tg, void *data)
struct rq *rq = data;
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
- /* group is entering throttled state, record last load */
+ /* group is entering throttled state, stop time */
if (!cfs_rq->throttle_count)
- update_cfs_load(cfs_rq, 0);
+ cfs_rq->throttled_clock_task = rq->clock_task;
cfs_rq->throttle_count++;
return 0;
@@ -1628,7 +2012,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
- /* account load preceding throttle */
+ /* freeze hierarchy runnable averages while throttled */
rcu_read_lock();
walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
rcu_read_unlock();
@@ -1652,7 +2036,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
rq->nr_running -= task_delta;
cfs_rq->throttled = 1;
- cfs_rq->throttled_timestamp = rq->clock;
+ cfs_rq->throttled_clock = rq->clock;
raw_spin_lock(&cfs_b->lock);
list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
raw_spin_unlock(&cfs_b->lock);
@@ -1670,10 +2054,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->throttled = 0;
raw_spin_lock(&cfs_b->lock);
- cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
+ cfs_b->throttled_time += rq->clock - cfs_rq->throttled_clock;
list_del_rcu(&cfs_rq->throttled_list);
raw_spin_unlock(&cfs_b->lock);
- cfs_rq->throttled_timestamp = 0;
update_rq_clock(rq);
/* update hierarchical throttle state */
@@ -2073,8 +2456,13 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq)
}
#else /* CONFIG_CFS_BANDWIDTH */
-static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {}
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
+{
+ return rq_of(cfs_rq)->clock_task;
+}
+
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
+ unsigned long delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
@@ -2207,12 +2595,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
+ update_entity_load_avg(se, 1);
+ update_cfs_rq_blocked_load(cfs_rq, 0);
}
- if (!se)
+ if (!se) {
+ update_rq_runnable_avg(rq, rq->nr_running);
inc_nr_running(rq);
+ }
hrtick_update(rq);
}
@@ -2266,12 +2656,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_cfs_load(cfs_rq, 0);
- update_cfs_shares(cfs_rq);
+ update_entity_load_avg(se, 1);
+ update_cfs_rq_blocked_load(cfs_rq, 0);
}
- if (!se)
+ if (!se) {
dec_nr_running(rq);
+ update_rq_runnable_avg(rq, 1);
+ }
hrtick_update(rq);
}
@@ -2781,6 +3173,37 @@ unlock:
return new_cpu;
}
+
+/*
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
+ * removed when useful for applications beyond shares distribution (e.g.
+ * load-balance).
+ */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
+ * cfs_rq_of(p) references at time of call are still valid and identify the
+ * previous cpu. However, the caller only guarantees p->pi_lock is held; no
+ * other assumptions, including the state of rq->lock, should be made.
+ */
+static void
+migrate_task_rq_fair(struct task_struct *p, int next_cpu)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ /*
+ * Load tracking: accumulate removed load so that it can be processed
+ * when we next update owning cfs_rq under rq->lock. Tasks contribute
+ * to blocked load iff they have a positive decay-count. It can never
+ * be negative here since on-rq tasks have decay-count == 0.
+ */
+ if (se->avg.decay_count) {
+ se->avg.decay_count = -__synchronize_entity_decay(se);
+ atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
+ }
+}
+#endif
#endif /* CONFIG_SMP */
static unsigned long
@@ -2907,7 +3330,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
* Batch and idle tasks do not preempt non-idle tasks (their preemption
* is driven by the tick):
*/
- if (unlikely(p->policy != SCHED_NORMAL))
+ if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
return;
find_matching_se(&se, &pse);
@@ -3033,8 +3456,122 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp
#ifdef CONFIG_SMP
/**************************************************
- * Fair scheduling class load-balancing methods:
- */
+ * Fair scheduling class load-balancing methods.
+ *
+ * BASICS
+ *
+ * The purpose of load-balancing is to achieve the same basic fairness the
+ * per-cpu scheduler provides, namely provide a proportional amount of compute
+ * time to each task. This is expressed in the following equation:
+ *
+ * W_i,n/P_i == W_j,n/P_j for all i,j (1)
+ *
+ * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
+ * W_i,0 is defined as:
+ *
+ * W_i,0 = \Sum_j w_i,j (2)
+ *
+ * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
+ * is derived from the nice value as per prio_to_weight[].
+ *
+ * The weight average is an exponential decay average of the instantaneous
+ * weight:
+ *
+ * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
+ *
+ * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
+ * fraction of 'recent' time available for SCHED_OTHER task execution. But it
+ * can also include other factors [XXX].
+ *
+ * To achieve this balance we define a measure of imbalance which follows
+ * directly from (1):
+ *
+ * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
+ *
+ * We them move tasks around to minimize the imbalance. In the continuous
+ * function space it is obvious this converges, in the discrete case we get
+ * a few fun cases generally called infeasible weight scenarios.
+ *
+ * [XXX expand on:
+ * - infeasible weights;
+ * - local vs global optima in the discrete case. ]
+ *
+ *
+ * SCHED DOMAINS
+ *
+ * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
+ * for all i,j solution, we create a tree of cpus that follows the hardware
+ * topology where each level pairs two lower groups (or better). This results
+ * in O(log n) layers. Furthermore we reduce the number of cpus going up the
+ * tree to only the first of the previous level and we decrease the frequency
+ * of load-balance at each level inv. proportional to the number of cpus in
+ * the groups.
+ *
+ * This yields:
+ *
+ * log_2 n 1 n
+ * \Sum { --- * --- * 2^i } = O(n) (5)
+ * i = 0 2^i 2^i
+ * `- size of each group
+ * | | `- number of cpus doing load-balance
+ * | `- freq
+ * `- sum over all levels
+ *
+ * Coupled with a limit on how many tasks we can migrate every balance pass,
+ * this makes (5) the runtime complexity of the balancer.
+ *
+ * An important property here is that each CPU is still (indirectly) connected
+ * to every other cpu in at most O(log n) steps:
+ *
+ * The adjacency matrix of the resulting graph is given by:
+ *
+ * log_2 n
+ * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
+ * k = 0
+ *
+ * And you'll find that:
+ *
+ * A^(log_2 n)_i,j != 0 for all i,j (7)
+ *
+ * Showing there's indeed a path between every cpu in at most O(log n) steps.
+ * The task movement gives a factor of O(m), giving a convergence complexity
+ * of:
+ *
+ * O(nm log n), n := nr_cpus, m := nr_tasks (8)
+ *
+ *
+ * WORK CONSERVING
+ *
+ * In order to avoid CPUs going idle while there's still work to do, new idle
+ * balancing is more aggressive and has the newly idle cpu iterate up the domain
+ * tree itself instead of relying on other CPUs to bring it work.
+ *
+ * This adds some complexity to both (5) and (8) but it reduces the total idle
+ * time.
+ *
+ * [XXX more?]
+ *
+ *
+ * CGROUPS
+ *
+ * Cgroups make a horror show out of (2), instead of a simple sum we get:
+ *
+ * s_k,i
+ * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
+ * S_k
+ *
+ * Where
+ *
+ * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
+ *
+ * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
+ *
+ * The big problem is S_k, its a global sum needed to compute a local (W_i)
+ * property.
+ *
+ * [XXX write more on how we solve this.. _after_ merging pjt's patches that
+ * rewrite all of this once again.]
+ */
static unsigned long __read_mostly max_load_balance_interval = HZ/10;
@@ -3300,52 +3837,58 @@ next:
/*
* update tg->load_weight by folding this cpu's load_avg
*/
-static int update_shares_cpu(struct task_group *tg, int cpu)
+static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
{
- struct cfs_rq *cfs_rq;
- unsigned long flags;
- struct rq *rq;
-
- if (!tg->se[cpu])
- return 0;
-
- rq = cpu_rq(cpu);
- cfs_rq = tg->cfs_rq[cpu];
-
- raw_spin_lock_irqsave(&rq->lock, flags);
+ struct sched_entity *se = tg->se[cpu];
+ struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
- update_rq_clock(rq);
- update_cfs_load(cfs_rq, 1);
+ /* throttled entities do not contribute to load */
+ if (throttled_hierarchy(cfs_rq))
+ return;
- /*
- * We need to update shares after updating tg->load_weight in
- * order to adjust the weight of groups with long running tasks.
- */
- update_cfs_shares(cfs_rq);
+ update_cfs_rq_blocked_load(cfs_rq, 1);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- return 0;
+ if (se) {
+ update_entity_load_avg(se, 1);
+ /*
+ * We pivot on our runnable average having decayed to zero for
+ * list removal. This generally implies that all our children
+ * have also been removed (modulo rounding error or bandwidth
+ * control); however, such cases are rare and we can fix these
+ * at enqueue.
+ *
+ * TODO: fix up out-of-order children on enqueue.
+ */
+ if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
+ list_del_leaf_cfs_rq(cfs_rq);
+ } else {
+ struct rq *rq = rq_of(cfs_rq);
+ update_rq_runnable_avg(rq, rq->nr_running);
+ }
}
-static void update_shares(int cpu)
+static void update_blocked_averages(int cpu)
{
- struct cfs_rq *cfs_rq;
struct rq *rq = cpu_rq(cpu);
+ struct cfs_rq *cfs_rq;
+ unsigned long flags;
- rcu_read_lock();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
/*
* Iterates the task_group tree in a bottom up fashion, see
* list_add_leaf_cfs_rq() for details.
*/
for_each_leaf_cfs_rq(rq, cfs_rq) {
- /* throttled entities do not contribute to load */
- if (throttled_hierarchy(cfs_rq))
- continue;
-
- update_shares_cpu(cfs_rq->tg, cpu);
+ /*
+ * Note: We may want to consider periodically releasing
+ * rq->lock about these updates so that creating many task
+ * groups does not result in continually extending hold time.
+ */
+ __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
}
- rcu_read_unlock();
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
}
/*
@@ -3397,7 +3940,7 @@ static unsigned long task_h_load(struct task_struct *p)
return load;
}
#else
-static inline void update_shares(int cpu)
+static inline void update_blocked_averages(int cpu)
{
}
@@ -4457,12 +5000,14 @@ void idle_balance(int this_cpu, struct rq *this_rq)
if (this_rq->avg_idle < sysctl_sched_migration_cost)
return;
+ update_rq_runnable_avg(this_rq, 1);
+
/*
* Drop the rq->lock, but keep IRQ/preempt disabled.
*/
raw_spin_unlock(&this_rq->lock);
- update_shares(this_cpu);
+ update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
unsigned long interval;
@@ -4717,7 +5262,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
int update_next_balance = 0;
int need_serialize;
- update_shares(cpu);
+ update_blocked_averages(cpu);
rcu_read_lock();
for_each_domain(cpu, sd) {
@@ -4954,6 +5499,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
cfs_rq = cfs_rq_of(se);
entity_tick(cfs_rq, se, queued);
}
+
+ update_rq_runnable_avg(rq, 1);
}
/*
@@ -5046,6 +5593,20 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
place_entity(cfs_rq, se, 0);
se->vruntime -= cfs_rq->min_vruntime;
}
+
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
+ /*
+ * Remove our load from contribution when we leave sched_fair
+ * and ensure we don't carry in an old decay_count if we
+ * switch back.
+ */
+ if (p->se.avg.decay_count) {
+ struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
+ __synchronize_entity_decay(&p->se);
+ subtract_blocked_load_contrib(cfs_rq,
+ p->se.avg.load_avg_contrib);
+ }
+#endif
}
/*
@@ -5092,11 +5653,16 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
#ifndef CONFIG_64BIT
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
+#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
+ atomic64_set(&cfs_rq->decay_counter, 1);
+ atomic64_set(&cfs_rq->removed_load, 0);
+#endif
}
#ifdef CONFIG_FAIR_GROUP_SCHED
static void task_move_group_fair(struct task_struct *p, int on_rq)
{
+ struct cfs_rq *cfs_rq;
/*
* If the task was not on the rq at the time of this cgroup movement
* it must have been asleep, sleeping tasks keep their ->vruntime
@@ -5128,8 +5694,19 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
if (!on_rq)
p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
set_task_rq(p, task_cpu(p));
- if (!on_rq)
- p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
+ if (!on_rq) {
+ cfs_rq = cfs_rq_of(&p->se);
+ p->se.vruntime += cfs_rq->min_vruntime;
+#ifdef CONFIG_SMP
+ /*
+ * migrate_task_rq_fair() will have removed our previous
+ * contribution, but we must synchronize for ongoing future
+ * decay.
+ */
+ p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
+ cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
+#endif
+ }
}
void free_fair_sched_group(struct task_group *tg)
@@ -5214,10 +5791,6 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
cfs_rq->tg = tg;
cfs_rq->rq = rq;
-#ifdef CONFIG_SMP
- /* allow initial update_cfs_load() to truncate */
- cfs_rq->load_stamp = 1;
-#endif
init_cfs_rq_runtime(cfs_rq);
tg->cfs_rq[cpu] = cfs_rq;
@@ -5264,8 +5837,11 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
se = tg->se[i];
/* Propagate contribution to hierarchy */
raw_spin_lock_irqsave(&rq->lock, flags);
- for_each_sched_entity(se)
+ for_each_sched_entity(se) {
update_cfs_shares(group_cfs_rq(se));
+ /* update contribution to parent */
+ update_entity_load_avg(se, 1);
+ }
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -5319,7 +5895,9 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair,
-
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ .migrate_task_rq = migrate_task_rq_fair,
+#endif
.rq_online = rq_online_fair,
.rq_offline = rq_offline_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index eebefcad7027..e68e69ab917d 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -32,6 +32,11 @@ SCHED_FEAT(LAST_BUDDY, true)
SCHED_FEAT(CACHE_HOT_BUDDY, true)
/*
+ * Allow wakeup-time preemption of the current task:
+ */
+SCHED_FEAT(WAKEUP_PREEMPTION, true)
+
+/*
* Use arch dependent cpu power functions
*/
SCHED_FEAT(ARCH_POWER, true)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 7a7db09cfabc..5eca173b563f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -112,6 +112,8 @@ struct task_group {
unsigned long shares;
atomic_t load_weight;
+ atomic64_t load_avg;
+ atomic_t runnable_avg;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -222,22 +224,29 @@ struct cfs_rq {
unsigned int nr_spread_over;
#endif
+#ifdef CONFIG_SMP
+/*
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
+ * removed when useful for applications beyond shares distribution (e.g.
+ * load-balance).
+ */
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
-
/*
- * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
- * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
- * (like users, containers etc.)
- *
- * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
- * list is used during load balance.
+ * CFS Load tracking
+ * Under CFS, load is tracked on a per-entity basis and aggregated up.
+ * This allows for the description of both thread and group usage (in
+ * the FAIR_GROUP_SCHED case).
*/
- int on_list;
- struct list_head leaf_cfs_rq_list;
- struct task_group *tg; /* group that "owns" this runqueue */
+ u64 runnable_load_avg, blocked_load_avg;
+ atomic64_t decay_counter, removed_load;
+ u64 last_decay;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+/* These always depend on CONFIG_FAIR_GROUP_SCHED */
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ u32 tg_runnable_contrib;
+ u64 tg_load_contrib;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
-#ifdef CONFIG_SMP
/*
* h_load = weight * f(tg)
*
@@ -245,26 +254,30 @@ struct cfs_rq {
* this group.
*/
unsigned long h_load;
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/*
- * Maintaining per-cpu shares distribution for group scheduling
+ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
+ * (like users, containers etc.)
*
- * load_stamp is the last time we updated the load average
- * load_last is the last time we updated the load average and saw load
- * load_unacc_exec_time is currently unaccounted execution time
+ * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
+ * list is used during load balance.
*/
- u64 load_avg;
- u64 load_period;
- u64 load_stamp, load_last, load_unacc_exec_time;
+ int on_list;
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg; /* group that "owns" this runqueue */
- unsigned long load_contribution;
-#endif /* CONFIG_SMP */
#ifdef CONFIG_CFS_BANDWIDTH
int runtime_enabled;
u64 runtime_expires;
s64 runtime_remaining;
- u64 throttled_timestamp;
+ u64 throttled_clock, throttled_clock_task;
+ u64 throttled_clock_task_time;
int throttled, throttle_count;
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
@@ -467,6 +480,8 @@ struct rq {
#ifdef CONFIG_SMP
struct llist_head wake_list;
#endif
+
+ struct sched_avg avg;
};
static inline int cpu_of(struct rq *rq)
@@ -1212,4 +1227,3 @@ static inline u64 irq_time_read(int cpu)
}
#endif /* CONFIG_64BIT */
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
-
diff --git a/kernel/softirq.c b/kernel/softirq.c
index cc96bdc0c2c9..ed567babe789 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
- vtime_account(current);
+ vtime_account_irq_enter(current);
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
lockdep_softirq_exit();
- vtime_account(current);
+ vtime_account_irq_exit(current);
__local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
*/
void irq_exit(void)
{
- vtime_account(current);
+ vtime_account_irq_exit(current);
trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET);
if (!in_interrupt() && local_softirq_pending())
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 97c465ebd844..2b859828cdc3 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -16,8 +16,10 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
+ * Copyright (C) Fujitsu, 2012
*
* Author: Paul McKenney <paulmck@us.ibm.com>
+ * Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU/ *.txt
@@ -34,6 +36,10 @@
#include <linux/delay.h>
#include <linux/srcu.h>
+#include <trace/events/rcu.h>
+
+#include "rcu.h"
+
/*
* Initialize an rcu_batch structure to empty.
*/
@@ -92,9 +98,6 @@ static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
}
}
-/* single-thread state-machine */
-static void process_srcu(struct work_struct *work);
-
static int init_srcu_struct_fields(struct srcu_struct *sp)
{
sp->completed = 0;
@@ -464,7 +467,9 @@ static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
*/
void synchronize_srcu(struct srcu_struct *sp)
{
- __synchronize_srcu(sp, SYNCHRONIZE_SRCU_TRYCOUNT);
+ __synchronize_srcu(sp, rcu_expedited
+ ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
+ : SYNCHRONIZE_SRCU_TRYCOUNT);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
@@ -637,7 +642,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
/*
* This is the work-queue function that handles SRCU grace periods.
*/
-static void process_srcu(struct work_struct *work)
+void process_srcu(struct work_struct *work)
{
struct srcu_struct *sp;
@@ -648,3 +653,4 @@ static void process_srcu(struct work_struct *work)
srcu_invoke_callbacks(sp);
srcu_reschedule(sp);
}
+EXPORT_SYMBOL_GPL(process_srcu);
diff --git a/kernel/sys.c b/kernel/sys.c
index e6e0ece5f6a0..265b37690421 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1046,7 +1046,7 @@ void do_sys_times(struct tms *tms)
cputime_t tgutime, tgstime, cutime, cstime;
spin_lock_irq(&current->sighand->siglock);
- thread_group_times(current, &tgutime, &tgstime);
+ thread_group_cputime_adjusted(current, &tgutime, &tgstime);
cutime = current->signal->cutime;
cstime = current->signal->cstime;
spin_unlock_irq(&current->sighand->siglock);
@@ -1704,7 +1704,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
utime = stime = 0;
if (who == RUSAGE_THREAD) {
- task_times(current, &utime, &stime);
+ task_cputime_adjusted(current, &utime, &stime);
accumulate_thread_rusage(p, r);
maxrss = p->signal->maxrss;
goto out;
@@ -1730,7 +1730,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
break;
case RUSAGE_SELF:
- thread_group_times(p, &tgutime, &tgstime);
+ thread_group_cputime_adjusted(p, &tgutime, &tgstime);
utime += tgutime;
stime += tgstime;
r->ru_nvcsw += p->signal->nvcsw;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 26f65eaa01f9..33f71f37267e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -565,7 +565,7 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
#endif
-#ifdef CONFIG_HOTPLUG
+
{
.procname = "hotplug",
.data = &uevent_helper,
@@ -573,7 +573,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dostring,
},
-#endif
+
#ifdef CONFIG_CHR_DEV_SG
{
.procname = "sg-big-buff",
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 6629bf7b5285..7a925ba456fb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -58,7 +58,7 @@ static cycle_t jiffies_read(struct clocksource *cs)
return (cycle_t) jiffies;
}
-struct clocksource clocksource_jiffies = {
+static struct clocksource clocksource_jiffies = {
.name = "jiffies",
.rating = 1, /* lowest valid rating*/
.read = jiffies_read,
@@ -67,6 +67,8 @@ struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT,
};
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
@@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ecad4e4..b1600a6973f4 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b901fed..cf3e59ed6dc0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a40260885265..d58e552d9fd1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/*
- * The time, when the last jiffy update happened. Protected by xtime_lock.
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
ktime_t delta;
/*
- * Do a quick check without holding xtime_lock:
+ * Do a quick check without holding jiffies_lock:
*/
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 < tick_period.tv64)
return;
- /* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ /* Reevalute with jiffies_lock held */
+ write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
/*
@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
return period;
}
+
+static void tick_sched_do_timer(ktime_t now)
+{
+ int cpu = smp_processor_id();
+
+#ifdef CONFIG_NO_HZ
+ /*
+ * Check if the do_timer duty was dropped. We don't care about
+ * concurrency: This happens only when the cpu in charge went
+ * into a long sleep. If two cpus happen to assign themself to
+ * this duty, then the jiffies update is still serialized by
+ * jiffies_lock.
+ */
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
+ tick_do_timer_cpu = cpu;
+#endif
+
+ /* Check, if the jiffies need an update */
+ if (tick_do_timer_cpu == cpu)
+ tick_do_update_jiffies64(now);
+}
+
+static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
+{
+#ifdef CONFIG_NO_HZ
+ /*
+ * When we are idle and the tick is stopped, we have to touch
+ * the watchdog as we might not schedule for a really long
+ * time. This happens on complete idle SMP systems while
+ * waiting on the login prompt. We also increment the "start of
+ * idle" jiffy stamp so the idle accounting adjustment we do
+ * when we go busy again does not account too much ticks.
+ */
+ if (ts->tick_stopped) {
+ touch_softlockup_watchdog();
+ if (is_idle_task(current))
+ ts->idle_jiffies++;
+ }
+#endif
+ update_process_times(user_mode(regs));
+ profile_tick(CPU_PROFILING);
+}
+
/*
* NOHZ - aka dynamic tick functionality
*/
@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
@@ -526,6 +569,8 @@ void tick_nohz_irq_exit(void)
if (!ts->inidle)
return;
+ /* Cancel the timer because CPU already waken up from the C-states*/
+ menu_hrtimer_cancel();
__tick_nohz_idle_enter(ts);
}
@@ -621,6 +666,8 @@ void tick_nohz_idle_exit(void)
ts->inidle = 0;
+ /* Cancel the timer because CPU already waken up from the C-states*/
+ menu_hrtimer_cancel();
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
@@ -648,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
struct pt_regs *regs = get_irq_regs();
- int cpu = smp_processor_id();
ktime_t now = ktime_get();
dev->next_event.tv64 = KTIME_MAX;
- /*
- * Check if the do_timer duty was dropped. We don't care about
- * concurrency: This happens only when the cpu in charge went
- * into a long sleep. If two cpus happen to assign themself to
- * this duty, then the jiffies update is still serialized by
- * xtime_lock.
- */
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
- tick_do_timer_cpu = cpu;
-
- /* Check, if the jiffies need an update */
- if (tick_do_timer_cpu == cpu)
- tick_do_update_jiffies64(now);
-
- /*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start
- * of idle" jiffy stamp so the idle accounting adjustment we
- * do when we go busy again does not account too much ticks.
- */
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
- ts->idle_jiffies++;
- }
-
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING);
+ tick_sched_do_timer(now);
+ tick_sched_handle(ts, regs);
while (tick_nohz_reprogram(ts, now)) {
now = ktime_get();
@@ -794,7 +813,7 @@ void tick_check_idle(int cpu)
#ifdef CONFIG_HIGH_RES_TIMERS
/*
* We rearm the timer until we get disabled by the idle code.
- * Called with interrupts disabled and timer->base->cpu_base->lock held.
+ * Called with interrupts disabled.
*/
static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
{
@@ -802,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
container_of(timer, struct tick_sched, sched_timer);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
- int cpu = smp_processor_id();
-#ifdef CONFIG_NO_HZ
- /*
- * Check if the do_timer duty was dropped. We don't care about
- * concurrency: This happens only when the cpu in charge went
- * into a long sleep. If two cpus happen to assign themself to
- * this duty, then the jiffies update is still serialized by
- * xtime_lock.
- */
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
- tick_do_timer_cpu = cpu;
-#endif
-
- /* Check, if the jiffies need an update */
- if (tick_do_timer_cpu == cpu)
- tick_do_update_jiffies64(now);
+ tick_sched_do_timer(now);
/*
* Do not call, when we are not in irq context and have
* no valid regs pointer
*/
- if (regs) {
- /*
- * When we are idle and the tick is stopped, we have to touch
- * the watchdog as we might not schedule for a really long
- * time. This happens on complete idle SMP systems while
- * waiting on the login prompt. We also increment the "start of
- * idle" jiffy stamp so the idle accounting adjustment we do
- * when we go busy again does not account too much ticks.
- */
- if (ts->tick_stopped) {
- touch_softlockup_watchdog();
- if (is_idle_task(current))
- ts->idle_jiffies++;
- }
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING);
- }
+ if (regs)
+ tick_sched_handle(ts, regs);
hrtimer_forward(timer, now, tick_period);
@@ -874,7 +863,7 @@ void tick_setup_sched_timer(void)
/* Get the next period (per cpu) */
hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
- /* Offset the tick to avert xtime_lock contention. */
+ /* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
u64 offset = ktime_to_ns(tick_period) >> 1;
do_div(offset, num_possible_cpus());
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e424970bb562..4c7de02eacdc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -25,12 +25,6 @@
static struct timekeeper timekeeper;
-/*
- * This read-write spinlock protects us from races in SMP while
- * playing with xtime.
- */
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void)
}
/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
+ * Must hold jiffies_lock
*/
void do_timer(unsigned long ticks)
{
@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f41c1d9..5d89335a485f 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -119,6 +119,7 @@ config TRACING
select BINARY_PRINTF
select EVENT_TRACING
select TRACE_CLOCK
+ select IRQ_WORK
config GENERIC_TRACER
bool
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9dcf15d38380..7693aaf324c6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
{
iter->pos = 0;
iter->func_pos = 0;
- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
+ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
}
static void *t_start(struct seq_file *m, loff_t *pos)
@@ -2868,7 +2868,7 @@ static int __init ftrace_mod_cmd_init(void)
{
return register_ftrace_command(&ftrace_mod_cmd);
}
-device_initcall(ftrace_mod_cmd_init);
+core_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
@@ -4055,7 +4055,7 @@ static int __init ftrace_nodyn_init(void)
ftrace_enabled = 1;
return 0;
}
-device_initcall(ftrace_nodyn_init);
+core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
@@ -4381,7 +4381,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
if (strlen(tmp) == 0)
return 1;
- ret = strict_strtol(tmp, 10, &val);
+ ret = kstrtol(tmp, 10, &val);
if (ret < 0)
return ret;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b979426d16c6..ce8514feedcd 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
unsigned long lost_events;
unsigned long last_overrun;
local_t entries_bytes;
- local_t commit_overrun;
- local_t overrun;
local_t entries;
+ local_t overrun;
+ local_t commit_overrun;
+ local_t dropped_events;
local_t committing;
local_t commits;
unsigned long read;
@@ -1396,6 +1397,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head_page_with_bit;
head_page = &rb_set_head_page(cpu_buffer)->list;
+ if (!head_page)
+ break;
prev_page = head_page->prev;
first_page = pages->next;
@@ -1820,7 +1823,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
}
/**
- * ring_buffer_update_event - update event type and data
+ * rb_update_event - update event type and data
* @event: the even to update
* @type: the type of event
* @length: the size of the event field in the ring buffer
@@ -2155,8 +2158,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are not in overwrite mode,
* this is easy, just stop here.
*/
- if (!(buffer->flags & RB_FL_OVERWRITE))
+ if (!(buffer->flags & RB_FL_OVERWRITE)) {
+ local_inc(&cpu_buffer->dropped_events);
goto out_reset;
+ }
ret = rb_handle_head_page(cpu_buffer,
tail_page,
@@ -2720,8 +2725,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* and not the length of the event which would hold the header.
*/
int ring_buffer_write(struct ring_buffer *buffer,
- unsigned long length,
- void *data)
+ unsigned long length,
+ void *data)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
@@ -2929,12 +2934,12 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
-unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
+u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
{
unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
- unsigned long ret;
+ u64 ret = 0;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0;
@@ -2949,7 +2954,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
bpage = cpu_buffer->reader_page;
else
bpage = rb_set_head_page(cpu_buffer);
- ret = bpage->page->time_stamp;
+ if (bpage)
+ ret = bpage->page->time_stamp;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret;
@@ -2995,7 +3001,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
- * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
+ * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
+ * buffer wrapping around (only if RB_FL_OVERWRITE is on).
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@@ -3015,7 +3022,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
- * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
+ * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
+ * commits failing due to the buffer wrapping around while there are uncommitted
+ * events, such as during an interrupt storm.
* @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from
*/
@@ -3036,6 +3045,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
/**
+ * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
+ * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
+ * @buffer: The ring buffer
+ * @cpu: The per CPU buffer to get the number of overruns from
+ */
+unsigned long
+ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ cpu_buffer = buffer->buffers[cpu];
+ ret = local_read(&cpu_buffer->dropped_events);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
+
+/**
* ring_buffer_entries - get the number of entries in a buffer
* @buffer: The ring buffer
*
@@ -3260,6 +3291,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Splice the empty reader page into the list around the head.
*/
reader = rb_set_head_page(cpu_buffer);
+ if (!reader)
+ goto out;
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev;
@@ -3778,12 +3811,17 @@ void
ring_buffer_read_finish(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
/*
* Ring buffer is disabled from recording, here's a good place
- * to check the integrity of the ring buffer.
+ * to check the integrity of the ring buffer.
+ * Must prevent readers from trying to read, as the check
+ * clears the HEAD page and readers require it.
*/
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled);
@@ -3864,9 +3902,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
- local_set(&cpu_buffer->commit_overrun, 0);
local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
+ local_set(&cpu_buffer->commit_overrun, 0);
+ local_set(&cpu_buffer->dropped_events, 0);
local_set(&cpu_buffer->entries, 0);
local_set(&cpu_buffer->committing, 0);
local_set(&cpu_buffer->commits, 0);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 31e4f55773f1..b69cc380322d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -19,6 +19,7 @@
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
+#include <linux/irq_work.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
@@ -78,6 +79,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
}
/*
+ * To prevent the comm cache from being overwritten when no
+ * tracing is active, only save the comm when a trace event
+ * occurred.
+ */
+static DEFINE_PER_CPU(bool, trace_cmdline_save);
+
+/*
+ * When a reader is waiting for data, then this variable is
+ * set to true.
+ */
+static bool trace_wakeup_needed;
+
+static struct irq_work trace_work_wakeup;
+
+/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
+
+static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
+static char *trace_boot_options __initdata;
+
+static int __init set_trace_boot_options(char *str)
+{
+ strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
+ trace_boot_options = trace_boot_options_buf;
+ return 0;
+}
+__setup("trace_options=", set_trace_boot_options);
+
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
@@ -198,20 +226,9 @@ static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
-/* tracer_enabled is used to toggle activation of a tracer */
-static int tracer_enabled = 1;
-
-/**
- * tracing_is_enabled - return tracer_enabled status
- *
- * This function is used by other tracers to know the status
- * of the tracer_enabled flag. Tracers may use this function
- * to know if it should enable their features when starting
- * up. See irqsoff tracer for an example (start_irqsoff_tracer).
- */
int tracing_is_enabled(void)
{
- return tracer_enabled;
+ return tracing_is_on();
}
/*
@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
-static void wakeup_work_handler(struct work_struct *work)
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Schedules a delayed work to wake up any task that is blocked on the
+ * trace_wait queue. These is used with trace_poll for tasks polling the
+ * trace.
+ */
+static void trace_wake_up(struct irq_work *work)
{
- wake_up(&trace_wait);
-}
+ wake_up_all(&trace_wait);
-static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
+}
/**
* tracing_on - enable tracing buffers
@@ -393,22 +416,6 @@ int tracing_is_on(void)
}
EXPORT_SYMBOL_GPL(tracing_is_on);
-/**
- * trace_wake_up - wake up tasks waiting for trace input
- *
- * Schedules a delayed work to wake up any task that is blocked on the
- * trace_wait queue. These is used with trace_poll for tasks polling the
- * trace.
- */
-void trace_wake_up(void)
-{
- const unsigned long delay = msecs_to_jiffies(2);
-
- if (trace_flags & TRACE_ITER_BLOCK)
- return;
- schedule_delayed_work(&wakeup_work, delay);
-}
-
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &threshold);
+ ret = kstrtoul(str, 0, &threshold);
if (ret < 0)
return 0;
tracing_thresh = threshold * 1000;
@@ -477,10 +484,12 @@ static const char *trace_options[] = {
static struct {
u64 (*func)(void);
const char *name;
+ int in_ns; /* is this clock in nanoseconds? */
} trace_clocks[] = {
- { trace_clock_local, "local" },
- { trace_clock_global, "global" },
- { trace_clock_counter, "counter" },
+ { trace_clock_local, "local", 1 },
+ { trace_clock_global, "global", 1 },
+ { trace_clock_counter, "counter", 0 },
+ ARCH_TRACE_CLOCKS
};
int trace_clock_id;
@@ -757,6 +766,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
}
#endif /* CONFIG_TRACER_MAX_TRACE */
+static void default_wait_pipe(struct trace_iterator *iter)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
+
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ trace_wakeup_needed = true;
+
+ if (trace_empty(iter))
+ schedule();
+
+ finish_wait(&trace_wait, &wait);
+}
+
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
@@ -875,32 +918,6 @@ int register_tracer(struct tracer *type)
return ret;
}
-void unregister_tracer(struct tracer *type)
-{
- struct tracer **t;
-
- mutex_lock(&trace_types_lock);
- for (t = &trace_types; *t; t = &(*t)->next) {
- if (*t == type)
- goto found;
- }
- pr_info("Tracer %s not registered\n", type->name);
- goto out;
-
- found:
- *t = (*t)->next;
-
- if (type == current_trace && tracer_enabled) {
- tracer_enabled = 0;
- tracing_stop();
- if (current_trace->stop)
- current_trace->stop(&global_trace);
- current_trace = &nop_trace;
- }
-out:
- mutex_unlock(&trace_types_lock);
-}
-
void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
@@ -1131,10 +1148,14 @@ void trace_find_cmdline(int pid, char comm[])
void tracing_record_cmdline(struct task_struct *tsk)
{
- if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
- !tracing_is_on())
+ if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
return;
+ if (!__this_cpu_read(trace_cmdline_save))
+ return;
+
+ __this_cpu_write(trace_cmdline_save, false);
+
trace_save_cmdline(tsk);
}
@@ -1178,27 +1199,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
return event;
}
+void
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
+{
+ __this_cpu_write(trace_cmdline_save, true);
+ if (trace_wakeup_needed) {
+ trace_wakeup_needed = false;
+ /* irq_work_queue() supplies it's own memory barriers */
+ irq_work_queue(&trace_work_wakeup);
+ }
+ ring_buffer_unlock_commit(buffer, event);
+}
+
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
- unsigned long flags, int pc,
- int wake)
+ unsigned long flags, int pc)
{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
-
- if (wake)
- trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
@@ -1215,29 +1245,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc)
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs)
{
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
-}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
-
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc,
- struct pt_regs *regs)
-{
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
@@ -1269,7 +1291,7 @@ trace_function(struct trace_array *tr,
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void
@@ -1362,7 +1384,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
@@ -1458,7 +1480,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
@@ -1559,10 +1581,10 @@ static int alloc_percpu_trace_buffer(void)
return -ENOMEM;
}
+static int buffers_allocated;
+
void trace_printk_init_buffers(void)
{
- static int buffers_allocated;
-
if (buffers_allocated)
return;
@@ -1571,7 +1593,38 @@ void trace_printk_init_buffers(void)
pr_info("ftrace: Allocated trace_printk buffers\n");
+ /* Expand the buffers to set size */
+ tracing_update_buffers();
+
buffers_allocated = 1;
+
+ /*
+ * trace_printk_init_buffers() can be called by modules.
+ * If that happens, then we need to start cmdline recording
+ * directly here. If the global_trace.buffer is already
+ * allocated here, then this was called by module code.
+ */
+ if (global_trace.buffer)
+ tracing_start_cmdline_record();
+}
+
+void trace_printk_start_comm(void)
+{
+ /* Start tracing comms if trace printk is set */
+ if (!buffers_allocated)
+ return;
+ tracing_start_cmdline_record();
+}
+
+static void trace_printk_start_stop_comm(int enabled)
+{
+ if (!buffers_allocated)
+ return;
+
+ if (enabled)
+ tracing_start_cmdline_record();
+ else
+ tracing_stop_cmdline_record();
}
/**
@@ -1622,7 +1675,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
@@ -1693,7 +1746,7 @@ int trace_array_vprintk(struct trace_array *tr,
memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
@@ -2426,6 +2479,10 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */
+ if (trace_clocks[trace_clock_id].in_ns)
+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
/* stop the trace while dumping */
tracing_stop();
@@ -2794,26 +2851,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled);
+
+ if (mask == TRACE_ITER_PRINTK)
+ trace_printk_start_stop_comm(enabled);
}
-static ssize_t
-tracing_trace_options_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int trace_set_options(char *option)
{
- char buf[64];
char *cmp;
int neg = 0;
- int ret;
+ int ret = 0;
int i;
- if (cnt >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(&buf, ubuf, cnt))
- return -EFAULT;
-
- buf[cnt] = 0;
- cmp = strstrip(buf);
+ cmp = strstrip(option);
if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
@@ -2832,10 +2882,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
- if (ret)
- return ret;
}
+ return ret;
+}
+
+static ssize_t
+tracing_trace_options_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ trace_set_options(buf);
+
*ppos += cnt;
return cnt;
@@ -2940,56 +3005,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
};
static ssize_t
-tracing_ctrl_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- char buf[64];
- int r;
-
- r = sprintf(buf, "%u\n", tracer_enabled);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-}
-
-static ssize_t
-tracing_ctrl_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct trace_array *tr = filp->private_data;
- unsigned long val;
- int ret;
-
- ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
- if (ret)
- return ret;
-
- val = !!val;
-
- mutex_lock(&trace_types_lock);
- if (tracer_enabled ^ val) {
-
- /* Only need to warn if this is used to change the state */
- WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
-
- if (val) {
- tracer_enabled = 1;
- if (current_trace->start)
- current_trace->start(tr);
- tracing_start();
- } else {
- tracer_enabled = 0;
- tracing_stop();
- if (current_trace->stop)
- current_trace->stop(tr);
- }
- }
- mutex_unlock(&trace_types_lock);
-
- *ppos += cnt;
-
- return cnt;
-}
-
-static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -3030,6 +3045,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
*/
ring_buffer_expanded = 1;
+ /* May be called before buffers are initialized */
+ if (!global_trace.buffer)
+ return 0;
+
ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0)
return ret;
@@ -3325,6 +3344,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */
+ if (trace_clocks[trace_clock_id].in_ns)
+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
+
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
@@ -3385,19 +3408,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
}
}
-
-void default_wait_pipe(struct trace_iterator *iter)
-{
- DEFINE_WAIT(wait);
-
- prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
-
- if (trace_empty(iter))
- schedule();
-
- finish_wait(&trace_wait, &wait);
-}
-
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
@@ -3438,7 +3448,7 @@ static int tracing_wait_pipe(struct file *filp)
return -EINTR;
/*
- * We block until we read something and tracing is disabled.
+ * We block until we read something and tracing is enabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
@@ -3446,7 +3456,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracer_enabled && iter->pos)
+ if (tracing_is_enabled() && iter->pos)
break;
}
@@ -3955,7 +3965,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
entry->buf[cnt] = '\0';
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
written = cnt;
@@ -4016,6 +4026,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
+ /*
+ * New clock may not be consistent with the previous clock.
+ * Reset the buffer so that it doesn't have incomparable timestamps.
+ */
+ tracing_reset_online_cpus(&global_trace);
+ if (max_tr.buffer)
+ tracing_reset_online_cpus(&max_tr);
+
mutex_unlock(&trace_types_lock);
*fpos += cnt;
@@ -4037,13 +4055,6 @@ static const struct file_operations tracing_max_lat_fops = {
.llseek = generic_file_llseek,
};
-static const struct file_operations tracing_ctrl_fops = {
- .open = tracing_open_generic,
- .read = tracing_ctrl_read,
- .write = tracing_ctrl_write,
- .llseek = generic_file_llseek,
-};
-
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
@@ -4377,13 +4388,27 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
- t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
- usec_rem = do_div(t, USEC_PER_SEC);
- trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
+ if (trace_clocks[trace_clock_id].in_ns) {
+ /* local or global for trace_clock */
+ t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
+ usec_rem = do_div(t, USEC_PER_SEC);
+ trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
+ t, usec_rem);
+
+ t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
+ usec_rem = do_div(t, USEC_PER_SEC);
+ trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
+ } else {
+ /* counter or tsc mode for trace_clock */
+ trace_seq_printf(s, "oldest event ts: %llu\n",
+ ring_buffer_oldest_event_ts(tr->buffer, cpu));
+
+ trace_seq_printf(s, "now ts: %llu\n",
+ ring_buffer_time_stamp(tr->buffer, cpu));
+ }
- t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
- usec_rem = do_div(t, USEC_PER_SEC);
- trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
+ cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
+ trace_seq_printf(s, "dropped events: %ld\n", cnt);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
@@ -4815,9 +4840,6 @@ static __init int tracer_init_debugfs(void)
d_tracer = tracing_init_dentry();
- trace_create_file("tracing_enabled", 0644, d_tracer,
- &global_trace, &tracing_ctrl_fops);
-
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
@@ -5089,6 +5111,7 @@ __init static int tracer_alloc_buffers(void)
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ /* Must be called before global_trace.buffer is allocated */
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
@@ -5136,6 +5159,7 @@ __init static int tracer_alloc_buffers(void)
#endif
trace_init_cmdlines();
+ init_irq_work(&trace_work_wakeup, trace_wake_up);
register_tracer(&nop_trace);
current_trace = &nop_trace;
@@ -5147,6 +5171,13 @@ __init static int tracer_alloc_buffers(void)
register_die_notifier(&trace_die_notifier);
+ while (trace_boot_options) {
+ char *option;
+
+ option = strsep(&trace_boot_options, ",");
+ trace_set_options(option);
+ }
+
return 0;
out_free_cpumask:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c15f528c1af4..c75d7988902c 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
- int print_max;
- int use_max_tr;
+ bool print_max;
+ bool use_max_tr;
};
@@ -327,7 +327,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
-void trace_wake_up(void);
void tracing_reset(struct trace_array *tr, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr);
void tracing_reset_current(int cpu);
@@ -349,9 +348,6 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
unsigned long len,
unsigned long flags,
int pc);
-void trace_buffer_unlock_commit(struct ring_buffer *buffer,
- struct ring_buffer_event *event,
- unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data);
@@ -359,6 +355,9 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
+void __buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
int trace_empty(struct trace_iterator *iter);
void *trace_find_next_entry_inc(struct trace_iterator *iter);
@@ -367,7 +366,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu);
-void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
@@ -407,12 +405,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
-void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
-enum trace_file_type {
- TRACE_FILE_LAT_FMT = 1,
- TRACE_FILE_ANNOTATE = 2,
-};
extern cpumask_var_t __read_mostly tracing_buffer_mask;
@@ -841,6 +834,7 @@ extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void);
+void trace_printk_start_comm(void);
#undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 8d3538b4ea5f..95e96842ed29 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -77,7 +77,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
@@ -199,7 +199,7 @@ __init static int init_branch_tracer(void)
}
return register_tracer(&branch_trace);
}
-device_initcall(init_branch_tracer);
+core_initcall(init_branch_tracer);
#else
static inline
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d608d09d08c0..880073d0b946 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -491,19 +491,6 @@ static void t_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex);
}
-static int
-ftrace_event_seq_open(struct inode *inode, struct file *file)
-{
- const struct seq_operations *seq_ops;
-
- if ((file->f_mode & FMODE_WRITE) &&
- (file->f_flags & O_TRUNC))
- ftrace_clear_events();
-
- seq_ops = inode->i_private;
- return seq_open(file, seq_ops);
-}
-
static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -980,6 +967,9 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r;
}
+static int ftrace_event_avail_open(struct inode *inode, struct file *file);
+static int ftrace_event_set_open(struct inode *inode, struct file *file);
+
static const struct seq_operations show_event_seq_ops = {
.start = t_start,
.next = t_next,
@@ -995,14 +985,14 @@ static const struct seq_operations show_set_event_seq_ops = {
};
static const struct file_operations ftrace_avail_fops = {
- .open = ftrace_event_seq_open,
+ .open = ftrace_event_avail_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct file_operations ftrace_set_event_fops = {
- .open = ftrace_event_seq_open,
+ .open = ftrace_event_set_open,
.read = seq_read,
.write = ftrace_event_write,
.llseek = seq_lseek,
@@ -1078,6 +1068,26 @@ static struct dentry *event_trace_events_dir(void)
return d_events;
}
+static int
+ftrace_event_avail_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_event_seq_ops;
+
+ return seq_open(file, seq_ops);
+}
+
+static int
+ftrace_event_set_open(struct inode *inode, struct file *file)
+{
+ const struct seq_operations *seq_ops = &show_set_event_seq_ops;
+
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC))
+ ftrace_clear_events();
+
+ return seq_open(file, seq_ops);
+}
+
static struct dentry *
event_subsystem_dir(const char *name, struct dentry *d_events)
{
@@ -1489,6 +1499,9 @@ static __init int event_trace_enable(void)
if (ret)
pr_warn("Failed to enable trace event: %s\n", token);
}
+
+ trace_printk_start_comm();
+
return 0;
}
@@ -1505,15 +1518,13 @@ static __init int event_trace_init(void)
return 0;
entry = debugfs_create_file("available_events", 0444, d_tracer,
- (void *)&show_event_seq_ops,
- &ftrace_avail_fops);
+ NULL, &ftrace_avail_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'available_events' entry\n");
entry = debugfs_create_file("set_event", 0644, d_tracer,
- (void *)&show_set_event_seq_ops,
- &ftrace_set_event_fops);
+ NULL, &ftrace_set_event_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'set_event' entry\n");
@@ -1749,7 +1760,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
- trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index c154797a7ff7..e5b0ca8b8d4d 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1000,9 +1000,9 @@ static int init_pred(struct filter_parse_state *ps,
}
} else {
if (field->is_signed)
- ret = strict_strtoll(pred->regex.pattern, 0, &val);
+ ret = kstrtoll(pred->regex.pattern, 0, &val);
else
- ret = strict_strtoull(pred->regex.pattern, 0, &val);
+ ret = kstrtoull(pred->regex.pattern, 0, &val);
if (ret) {
parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
return -EINVAL;
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 507a7a9630bf..bb227e380cb5 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -366,7 +366,7 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
* We use the callback data field (which is a pointer)
* as our counter.
*/
- ret = strict_strtoul(number, 0, (unsigned long *)&count);
+ ret = kstrtoul(number, 0, (unsigned long *)&count);
if (ret)
return ret;
@@ -411,5 +411,4 @@ static __init int init_function_trace(void)
init_func_cmd_traceon();
return register_tracer(&function_trace);
}
-device_initcall(init_function_trace);
-
+core_initcall(init_function_trace);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 99b4378393d5..4edb4b74eb7e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -223,7 +223,7 @@ int __trace_graph_entry(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->graph_ent = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
return 1;
}
@@ -327,7 +327,7 @@ void __trace_graph_return(struct trace_array *tr,
entry = ring_buffer_event_data(event);
entry->ret = *trace;
if (!filter_current_check_discard(buffer, call, entry, event))
- ring_buffer_unlock_commit(buffer, event);
+ __buffer_unlock_commit(buffer, event);
}
void trace_graph_return(struct ftrace_graph_ret *trace)
@@ -1474,4 +1474,4 @@ static __init int init_graph_trace(void)
return register_tracer(&graph_trace);
}
-device_initcall(init_graph_trace);
+core_initcall(init_graph_trace);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d98ee8283b29..5ffce7b0f33c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
@@ -698,4 +698,4 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
-device_initcall(init_irqsoff_tracer);
+core_initcall(init_irqsoff_tracer);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 1a2117043bb1..1865d5f76538 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -444,7 +444,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
/* an address specified */
- ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
+ ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) {
pr_info("Failed to parse address.\n");
return ret;
@@ -751,8 +751,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Kretprobe handler */
@@ -784,8 +784,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event))
- trace_nowake_buffer_unlock_commit_regs(buffer, event,
- irq_flags, pc, regs);
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
}
/* Event entry printers */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 123b189c732c..194d79602dc7 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -610,24 +610,54 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
return trace_print_lat_fmt(s, entry);
}
-static unsigned long preempt_mark_thresh = 100;
+static unsigned long preempt_mark_thresh_us = 100;
static int
-lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
- unsigned long rel_usecs)
+lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{
- return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
- rel_usecs > preempt_mark_thresh ? '!' :
- rel_usecs > 1 ? '+' : ' ');
+ unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
+ unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
+ unsigned long long abs_ts = iter->ts - iter->tr->time_start;
+ unsigned long long rel_ts = next_ts - iter->ts;
+ struct trace_seq *s = &iter->seq;
+
+ if (in_ns) {
+ abs_ts = ns2usecs(abs_ts);
+ rel_ts = ns2usecs(rel_ts);
+ }
+
+ if (verbose && in_ns) {
+ unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
+ unsigned long abs_msec = (unsigned long)abs_ts;
+ unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
+ unsigned long rel_msec = (unsigned long)rel_ts;
+
+ return trace_seq_printf(
+ s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
+ ns2usecs(iter->ts),
+ abs_msec, abs_usec,
+ rel_msec, rel_usec);
+ } else if (verbose && !in_ns) {
+ return trace_seq_printf(
+ s, "[%016llx] %lld (+%lld): ",
+ iter->ts, abs_ts, rel_ts);
+ } else if (!verbose && in_ns) {
+ return trace_seq_printf(
+ s, " %4lldus%c: ",
+ abs_ts,
+ rel_ts > preempt_mark_thresh_us ? '!' :
+ rel_ts > 1 ? '+' : ' ');
+ } else { /* !verbose && !in_ns */
+ return trace_seq_printf(s, " %4lld: ", abs_ts);
+ }
}
int trace_print_context(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
- unsigned long long t = ns2usecs(iter->ts);
- unsigned long usec_rem = do_div(t, USEC_PER_SEC);
- unsigned long secs = (unsigned long)t;
+ unsigned long long t;
+ unsigned long secs, usec_rem;
char comm[TASK_COMM_LEN];
int ret;
@@ -644,8 +674,13 @@ int trace_print_context(struct trace_iterator *iter)
return 0;
}
- return trace_seq_printf(s, " %5lu.%06lu: ",
- secs, usec_rem);
+ if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
+ t = ns2usecs(iter->ts);
+ usec_rem = do_div(t, USEC_PER_SEC);
+ secs = (unsigned long)t;
+ return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
+ } else
+ return trace_seq_printf(s, " %12llu: ", iter->ts);
}
int trace_print_lat_context(struct trace_iterator *iter)
@@ -659,36 +694,29 @@ int trace_print_lat_context(struct trace_iterator *iter)
*next_entry = trace_find_next_entry(iter, NULL,
&next_ts);
unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
- unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
- unsigned long rel_usecs;
/* Restore the original ent_size */
iter->ent_size = ent_size;
if (!next_entry)
next_ts = iter->ts;
- rel_usecs = ns2usecs(next_ts - iter->ts);
if (verbose) {
char comm[TASK_COMM_LEN];
trace_find_cmdline(entry->pid, comm);
- ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
- " %ld.%03ldms (+%ld.%03ldms): ", comm,
- entry->pid, iter->cpu, entry->flags,
- entry->preempt_count, iter->idx,
- ns2usecs(iter->ts),
- abs_usecs / USEC_PER_MSEC,
- abs_usecs % USEC_PER_MSEC,
- rel_usecs / USEC_PER_MSEC,
- rel_usecs % USEC_PER_MSEC);
+ ret = trace_seq_printf(
+ s, "%16s %5d %3d %d %08x %08lx ",
+ comm, entry->pid, iter->cpu, entry->flags,
+ entry->preempt_count, iter->idx);
} else {
ret = lat_print_generic(s, entry, iter->cpu);
- if (ret)
- ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
}
+ if (ret)
+ ret = lat_print_timestamp(iter, next_ts);
+
return ret;
}
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index daa9980153af..412e959709b4 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -441,7 +441,7 @@ static const struct fetch_type *find_fetch_type(const char *type)
goto fail;
type++;
- if (strict_strtoul(type, 0, &bs))
+ if (kstrtoul(type, 0, &bs))
goto fail;
switch (bs) {
@@ -501,8 +501,8 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
tmp = strchr(symbol, '+');
if (tmp) {
- /* skip sign because strict_strtol doesn't accept '+' */
- ret = strict_strtoul(tmp + 1, 0, offset);
+ /* skip sign because kstrtoul doesn't accept '+' */
+ ret = kstrtoul(tmp + 1, 0, offset);
if (ret)
return ret;
@@ -533,7 +533,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
else
ret = -EINVAL;
} else if (isdigit(arg[5])) {
- ret = strict_strtoul(arg + 5, 10, &param);
+ ret = kstrtoul(arg + 5, 10, &param);
if (ret || param > PARAM_MAX_STACK)
ret = -EINVAL;
else {
@@ -579,7 +579,7 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
case '@': /* memory or symbol */
if (isdigit(arg[1])) {
- ret = strict_strtoul(arg + 1, 0, &param);
+ ret = kstrtoul(arg + 1, 0, &param);
if (ret)
break;
@@ -597,14 +597,14 @@ static int parse_probe_arg(char *arg, const struct fetch_type *t,
break;
case '+': /* deref memory */
- arg++; /* Skip '+', because strict_strtol() rejects it. */
+ arg++; /* Skip '+', because kstrtol() rejects it. */
case '-':
tmp = strchr(arg, '(');
if (!tmp)
break;
*tmp = '\0';
- ret = strict_strtol(arg, 0, &offset);
+ ret = kstrtol(arg, 0, &offset);
if (ret)
break;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 7e62c0a18456..3374c792ccd8 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -102,9 +102,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!filter_check_discard(call, entry, buffer, event))
- ring_buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr->buffer, flags, 6, pc);
- ftrace_trace_userstack(tr->buffer, flags, pc);
+ trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 02170c00c413..bc64fc137554 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
@@ -637,4 +637,4 @@ __init static int init_wakeup_tracer(void)
return 0;
}
-device_initcall(init_wakeup_tracer);
+core_initcall(init_wakeup_tracer);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 2c00a691a540..47623169a815 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -320,7 +320,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
int (*func)(void))
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
unsigned long count;
char *func_name;
int ret;
@@ -331,7 +330,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* passed in by parameter to fool gcc from optimizing */
func();
@@ -395,7 +393,6 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
/* Enable tracing on all functions again */
ftrace_set_global_filter(NULL, 0, 1);
@@ -452,7 +449,6 @@ static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@@ -465,7 +461,6 @@ trace_selftest_function_recursion(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -534,7 +529,6 @@ trace_selftest_function_recursion(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
return ret;
}
@@ -569,7 +563,6 @@ static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
@@ -586,7 +579,6 @@ trace_selftest_function_regs(void)
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
- tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
@@ -648,7 +640,6 @@ trace_selftest_function_regs(void)
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
return ret;
}
@@ -662,7 +653,6 @@ int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
int save_ftrace_enabled = ftrace_enabled;
- int save_tracer_enabled = tracer_enabled;
unsigned long count;
int ret;
@@ -671,7 +661,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* start the tracing */
ftrace_enabled = 1;
- tracer_enabled = 1;
ret = tracer_init(trace, tr);
if (ret) {
@@ -708,7 +697,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_function_regs();
out:
ftrace_enabled = save_ftrace_enabled;
- tracer_enabled = save_tracer_enabled;
/* kill ftrace totally if we failed */
if (ret)
@@ -1106,6 +1094,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
tracing_stop();
/* check both trace buffers */
ret = trace_test_buffer(tr, NULL);
+ printk("ret = %d\n", ret);
if (!ret)
ret = trace_test_buffer(&max_tr, &count);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 2485a7d09b11..7609dd6714c2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -21,9 +21,6 @@ static int syscall_enter_register(struct ftrace_event_call *event,
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type, void *data);
-static int syscall_enter_define_fields(struct ftrace_event_call *call);
-static int syscall_exit_define_fields(struct ftrace_event_call *call);
-
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
@@ -32,30 +29,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields;
}
-struct trace_event_functions enter_syscall_print_funcs = {
- .trace = print_syscall_enter,
-};
-
-struct trace_event_functions exit_syscall_print_funcs = {
- .trace = print_syscall_exit,
-};
-
-struct ftrace_event_class event_class_syscall_enter = {
- .system = "syscalls",
- .reg = syscall_enter_register,
- .define_fields = syscall_enter_define_fields,
- .get_fields = syscall_get_enter_fields,
- .raw_init = init_syscall_trace,
-};
-
-struct ftrace_event_class event_class_syscall_exit = {
- .system = "syscalls",
- .reg = syscall_exit_register,
- .define_fields = syscall_exit_define_fields,
- .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
- .raw_init = init_syscall_trace,
-};
-
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
@@ -432,7 +405,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
-int init_syscall_trace(struct ftrace_event_call *call)
+static int init_syscall_trace(struct ftrace_event_call *call)
{
int id;
int num;
@@ -457,6 +430,30 @@ int init_syscall_trace(struct ftrace_event_call *call)
return id;
}
+struct trace_event_functions enter_syscall_print_funcs = {
+ .trace = print_syscall_enter,
+};
+
+struct trace_event_functions exit_syscall_print_funcs = {
+ .trace = print_syscall_exit,
+};
+
+struct ftrace_event_class event_class_syscall_enter = {
+ .system = "syscalls",
+ .reg = syscall_enter_register,
+ .define_fields = syscall_enter_define_fields,
+ .get_fields = syscall_get_enter_fields,
+ .raw_init = init_syscall_trace,
+};
+
+struct ftrace_event_class event_class_syscall_exit = {
+ .system = "syscalls",
+ .reg = syscall_exit_register,
+ .define_fields = syscall_exit_define_fields,
+ .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
+ .raw_init = init_syscall_trace,
+};
+
unsigned long __init __weak arch_syscall_addr(int nr)
{
return (unsigned long)sys_call_table[nr];
@@ -537,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-int perf_sysenter_enable(struct ftrace_event_call *call)
+static int perf_sysenter_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@@ -558,7 +555,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
return ret;
}
-void perf_sysenter_disable(struct ftrace_event_call *call)
+static void perf_sysenter_disable(struct ftrace_event_call *call)
{
int num;
@@ -615,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
-int perf_sysexit_enable(struct ftrace_event_call *call)
+static int perf_sysexit_enable(struct ftrace_event_call *call)
{
int ret = 0;
int num;
@@ -636,7 +633,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
return ret;
}
-void perf_sysexit_disable(struct ftrace_event_call *call)
+static void perf_sysexit_disable(struct ftrace_event_call *call)
{
int num;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 03003cd7dd96..9614db8b0f8c 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -189,7 +189,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (argv[0][0] == '-')
is_delete = true;
else if (argv[0][0] != 'p') {
- pr_info("Probe definition must be started with 'p', 'r' or" " '-'.\n");
+ pr_info("Probe definition must be started with 'p' or '-'.\n");
return -EINVAL;
}
@@ -252,7 +252,7 @@ static int create_trace_uprobe(int argc, char **argv)
if (ret)
goto fail_address_parse;
- ret = strict_strtoul(arg, 0, &offset);
+ ret = kstrtoul(arg, 0, &offset);
if (ret)
goto fail_address_parse;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9d4c8d5a1f53..c8c21be11ab4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -116,7 +116,7 @@ static unsigned long get_timestamp(int this_cpu)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-static unsigned long get_sample_period(void)
+static u64 get_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
@@ -125,7 +125,7 @@ static unsigned long get_sample_period(void)
* and hard thresholds) to increment before the
* hardlockup detector generates a warning
*/
- return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
+ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
@@ -368,6 +368,9 @@ static void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ if (!watchdog_enabled)
+ return;
+
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
/* disable the perf event */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 042d221d33cc..1dae900df798 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
timer->data != (unsigned long)dwork);
- BUG_ON(timer_pending(timer));
- BUG_ON(!list_empty(&work->entry));
+ WARN_ON_ONCE(timer_pending(timer));
+ WARN_ON_ONCE(!list_empty(&work->entry));
+
+ /*
+ * If @delay is 0, queue @dwork->work immediately. This is for
+ * both optimization and correctness. The earliest @timer can
+ * expire is on the closest next tick and delayed_work users depend
+ * on that there's no such delay when @delay is 0.
+ */
+ if (!delay) {
+ __queue_work(cpu, wq, &dwork->work);
+ return;
+ }
timer_stats_timer_set_start_info(&dwork->timer);
@@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
bool ret = false;
unsigned long flags;
- if (!delay)
- return queue_work_on(cpu, wq, &dwork->work);
-
/* read the comment in __queue_work() */
local_irq_save(flags);
@@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
repeat:
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
return 0;
+ }
/*
* See whether any cpu is asking for help. Unbounded
OpenPOWER on IntegriCloud