summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/perf_counter.c320
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sysctl.c13
-rw-r--r--kernel/time/timer_stats.c16
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c56
-rw-r--r--kernel/trace/ring_buffer.c11
-rw-r--r--kernel/trace/trace.c23
-rw-r--r--kernel/trace/trace.h7
-rw-r--r--kernel/trace/trace_events.c28
-rw-r--r--kernel/trace/trace_functions.c3
-rw-r--r--kernel/trace/trace_printk.c26
-rw-r--r--kernel/trace/trace_stat.c6
16 files changed, 386 insertions, 136 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 780c8dcf4516..2093a691f1c2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_FUNCTION_TRACER) += trace/
obj-$(CONFIG_TRACING) += trace/
obj-$(CONFIG_X86_DS) += trace/
+obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 7afa31564162..9f3391090b3e 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -215,6 +215,7 @@ static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
static int acct_on(char *name)
{
struct file *file;
+ struct vfsmount *mnt;
int error;
struct pid_namespace *ns;
struct bsd_acct_struct *acct = NULL;
@@ -256,11 +257,12 @@ static int acct_on(char *name)
acct = NULL;
}
- mnt_pin(file->f_path.mnt);
+ mnt = file->f_path.mnt;
+ mnt_pin(mnt);
acct_file_reopen(ns->bacct, file, ns);
spin_unlock(&acct_lock);
- mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
+ mntput(mnt); /* it's pinned, now give up active reference */
kfree(acct);
return 0;
diff --git a/kernel/futex.c b/kernel/futex.c
index 1c337112335c..794c862125fe 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -299,7 +299,7 @@ void put_futex_key(int fshared, union futex_key *key)
static int fault_in_user_writeable(u32 __user *uaddr)
{
int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
- sizeof(*uaddr), 1, 0, NULL, NULL);
+ 1, 1, 0, NULL, NULL);
return ret < 0 ? ret : 0;
}
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 1a933a221ea4..d55a50da2347 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -236,6 +236,8 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
list_add_rcu(&counter->event_entry, &ctx->event_list);
ctx->nr_counters++;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat++;
}
/*
@@ -250,6 +252,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
if (list_empty(&counter->list_entry))
return;
ctx->nr_counters--;
+ if (counter->attr.inherit_stat)
+ ctx->nr_stat--;
list_del_init(&counter->list_entry);
list_del_rcu(&counter->event_entry);
@@ -1006,6 +1010,81 @@ static int context_equiv(struct perf_counter_context *ctx1,
&& !ctx1->pin_count && !ctx2->pin_count;
}
+static void __perf_counter_read(void *counter);
+
+static void __perf_counter_sync_stat(struct perf_counter *counter,
+ struct perf_counter *next_counter)
+{
+ u64 value;
+
+ if (!counter->attr.inherit_stat)
+ return;
+
+ /*
+ * Update the counter value, we cannot use perf_counter_read()
+ * because we're in the middle of a context switch and have IRQs
+ * disabled, which upsets smp_call_function_single(), however
+ * we know the counter must be on the current CPU, therefore we
+ * don't need to use it.
+ */
+ switch (counter->state) {
+ case PERF_COUNTER_STATE_ACTIVE:
+ __perf_counter_read(counter);
+ break;
+
+ case PERF_COUNTER_STATE_INACTIVE:
+ update_counter_times(counter);
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * In order to keep per-task stats reliable we need to flip the counter
+ * values when we flip the contexts.
+ */
+ value = atomic64_read(&next_counter->count);
+ value = atomic64_xchg(&counter->count, value);
+ atomic64_set(&next_counter->count, value);
+
+ swap(counter->total_time_enabled, next_counter->total_time_enabled);
+ swap(counter->total_time_running, next_counter->total_time_running);
+
+ /*
+ * Since we swizzled the values, update the user visible data too.
+ */
+ perf_counter_update_userpage(counter);
+ perf_counter_update_userpage(next_counter);
+}
+
+#define list_next_entry(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+
+static void perf_counter_sync_stat(struct perf_counter_context *ctx,
+ struct perf_counter_context *next_ctx)
+{
+ struct perf_counter *counter, *next_counter;
+
+ if (!ctx->nr_stat)
+ return;
+
+ counter = list_first_entry(&ctx->event_list,
+ struct perf_counter, event_entry);
+
+ next_counter = list_first_entry(&next_ctx->event_list,
+ struct perf_counter, event_entry);
+
+ while (&counter->event_entry != &ctx->event_list &&
+ &next_counter->event_entry != &next_ctx->event_list) {
+
+ __perf_counter_sync_stat(counter, next_counter);
+
+ counter = list_next_entry(counter, event_entry);
+ next_counter = list_next_entry(counter, event_entry);
+ }
+}
+
/*
* Called from scheduler to remove the counters of the current task,
* with interrupts disabled.
@@ -1061,6 +1140,8 @@ void perf_counter_task_sched_out(struct task_struct *task,
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
+
+ perf_counter_sync_stat(ctx, next_ctx);
}
spin_unlock(&next_ctx->lock);
spin_unlock(&ctx->lock);
@@ -1348,9 +1429,56 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
}
/*
+ * Enable all of a task's counters that have been marked enable-on-exec.
+ * This expects task == current.
+ */
+static void perf_counter_enable_on_exec(struct task_struct *task)
+{
+ struct perf_counter_context *ctx;
+ struct perf_counter *counter;
+ unsigned long flags;
+ int enabled = 0;
+
+ local_irq_save(flags);
+ ctx = task->perf_counter_ctxp;
+ if (!ctx || !ctx->nr_counters)
+ goto out;
+
+ __perf_counter_task_sched_out(ctx);
+
+ spin_lock(&ctx->lock);
+
+ list_for_each_entry(counter, &ctx->counter_list, list_entry) {
+ if (!counter->attr.enable_on_exec)
+ continue;
+ counter->attr.enable_on_exec = 0;
+ if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
+ continue;
+ counter->state = PERF_COUNTER_STATE_INACTIVE;
+ counter->tstamp_enabled =
+ ctx->time - counter->total_time_enabled;
+ enabled = 1;
+ }
+
+ /*
+ * Unclone this context if we enabled any counter.
+ */
+ if (enabled && ctx->parent_ctx) {
+ put_ctx(ctx->parent_ctx);
+ ctx->parent_ctx = NULL;
+ }
+
+ spin_unlock(&ctx->lock);
+
+ perf_counter_task_sched_in(task, smp_processor_id());
+ out:
+ local_irq_restore(flags);
+}
+
+/*
* Cross CPU call to read the hardware counter
*/
-static void __read(void *info)
+static void __perf_counter_read(void *info)
{
struct perf_counter *counter = info;
struct perf_counter_context *ctx = counter->ctx;
@@ -1372,7 +1500,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
*/
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
smp_call_function_single(counter->oncpu,
- __read, counter, 1);
+ __perf_counter_read, counter, 1);
} else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
update_counter_times(counter);
}
@@ -1508,11 +1636,13 @@ static void free_counter(struct perf_counter *counter)
{
perf_pending_sync(counter);
- atomic_dec(&nr_counters);
- if (counter->attr.mmap)
- atomic_dec(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_dec(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_dec(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_dec(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_dec(&nr_comm_counters);
+ }
if (counter->destroy)
counter->destroy(counter);
@@ -1751,6 +1881,14 @@ int perf_counter_task_disable(void)
return 0;
}
+static int perf_counter_index(struct perf_counter *counter)
+{
+ if (counter->state != PERF_COUNTER_STATE_ACTIVE)
+ return 0;
+
+ return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@ -1775,11 +1913,17 @@ void perf_counter_update_userpage(struct perf_counter *counter)
preempt_disable();
++userpg->lock;
barrier();
- userpg->index = counter->hw.idx;
+ userpg->index = perf_counter_index(counter);
userpg->offset = atomic64_read(&counter->count);
if (counter->state == PERF_COUNTER_STATE_ACTIVE)
userpg->offset -= atomic64_read(&counter->hw.prev_count);
+ userpg->time_enabled = counter->total_time_enabled +
+ atomic64_read(&counter->child_total_time_enabled);
+
+ userpg->time_running = counter->total_time_running +
+ atomic64_read(&counter->child_total_time_running);
+
barrier();
++userpg->lock;
preempt_enable();
@@ -2483,15 +2627,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
u32 cpu, reserved;
} cpu_entry;
- header.type = 0;
+ header.type = PERF_EVENT_SAMPLE;
header.size = sizeof(header);
- header.misc = PERF_EVENT_MISC_OVERFLOW;
+ header.misc = 0;
header.misc |= perf_misc_flags(data->regs);
if (sample_type & PERF_SAMPLE_IP) {
ip = perf_instruction_pointer(data->regs);
- header.type |= PERF_SAMPLE_IP;
header.size += sizeof(ip);
}
@@ -2500,7 +2643,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
tid_entry.pid = perf_counter_pid(counter, current);
tid_entry.tid = perf_counter_tid(counter, current);
- header.type |= PERF_SAMPLE_TID;
header.size += sizeof(tid_entry);
}
@@ -2510,34 +2652,25 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
*/
time = sched_clock();
- header.type |= PERF_SAMPLE_TIME;
header.size += sizeof(u64);
}
- if (sample_type & PERF_SAMPLE_ADDR) {
- header.type |= PERF_SAMPLE_ADDR;
+ if (sample_type & PERF_SAMPLE_ADDR)
header.size += sizeof(u64);
- }
- if (sample_type & PERF_SAMPLE_ID) {
- header.type |= PERF_SAMPLE_ID;
+ if (sample_type & PERF_SAMPLE_ID)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_CPU) {
- header.type |= PERF_SAMPLE_CPU;
header.size += sizeof(cpu_entry);
cpu_entry.cpu = raw_smp_processor_id();
}
- if (sample_type & PERF_SAMPLE_PERIOD) {
- header.type |= PERF_SAMPLE_PERIOD;
+ if (sample_type & PERF_SAMPLE_PERIOD)
header.size += sizeof(u64);
- }
if (sample_type & PERF_SAMPLE_GROUP) {
- header.type |= PERF_SAMPLE_GROUP;
header.size += sizeof(u64) +
counter->nr_siblings * sizeof(group_entry);
}
@@ -2547,10 +2680,9 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
if (callchain) {
callchain_size = (1 + callchain->nr) * sizeof(u64);
-
- header.type |= PERF_SAMPLE_CALLCHAIN;
header.size += callchain_size;
- }
+ } else
+ header.size += sizeof(u64);
}
ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2601,13 +2733,79 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
}
}
- if (callchain)
- perf_output_copy(&handle, callchain, callchain_size);
+ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
+ if (callchain)
+ perf_output_copy(&handle, callchain, callchain_size);
+ else {
+ u64 nr = 0;
+ perf_output_put(&handle, nr);
+ }
+ }
perf_output_end(&handle);
}
/*
+ * read event
+ */
+
+struct perf_read_event {
+ struct perf_event_header header;
+
+ u32 pid;
+ u32 tid;
+ u64 value;
+ u64 format[3];
+};
+
+static void
+perf_counter_read_event(struct perf_counter *counter,
+ struct task_struct *task)
+{
+ struct perf_output_handle handle;
+ struct perf_read_event event = {
+ .header = {
+ .type = PERF_EVENT_READ,
+ .misc = 0,
+ .size = sizeof(event) - sizeof(event.format),
+ },
+ .pid = perf_counter_pid(counter, task),
+ .tid = perf_counter_tid(counter, task),
+ .value = atomic64_read(&counter->count),
+ };
+ int ret, i = 0;
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_enabled;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ event.header.size += sizeof(u64);
+ event.format[i++] = counter->total_time_running;
+ }
+
+ if (counter->attr.read_format & PERF_FORMAT_ID) {
+ u64 id;
+
+ event.header.size += sizeof(u64);
+ if (counter->parent)
+ id = counter->parent->id;
+ else
+ id = counter->id;
+
+ event.format[i++] = id;
+ }
+
+ ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
+ if (ret)
+ return;
+
+ perf_output_copy(&handle, &event, event.header.size);
+ perf_output_end(&handle);
+}
+
+/*
* fork tracking
*/
@@ -2798,6 +2996,9 @@ void perf_counter_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
+ if (task->perf_counter_ctxp)
+ perf_counter_enable_on_exec(task);
+
if (!atomic_read(&nr_comm_counters))
return;
@@ -3317,8 +3518,8 @@ out:
put_cpu_var(perf_cpu_context);
}
-void
-perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+void __perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data = {
.regs = regs,
@@ -3509,9 +3710,21 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
}
#endif
+atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
+
+static void sw_perf_counter_destroy(struct perf_counter *counter)
+{
+ u64 event = counter->attr.config;
+
+ WARN_ON(counter->parent);
+
+ atomic_dec(&perf_swcounter_enabled[event]);
+}
+
static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
{
const struct pmu *pmu = NULL;
+ u64 event = counter->attr.config;
/*
* Software counters (currently) can't in general distinguish
@@ -3520,7 +3733,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (counter->attr.config) {
+ switch (event) {
case PERF_COUNT_SW_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
@@ -3541,6 +3754,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
case PERF_COUNT_SW_CONTEXT_SWITCHES:
case PERF_COUNT_SW_CPU_MIGRATIONS:
+ if (!counter->parent) {
+ atomic_inc(&perf_swcounter_enabled[event]);
+ counter->destroy = sw_perf_counter_destroy;
+ }
pmu = &perf_ops_generic;
break;
}
@@ -3556,6 +3773,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
int cpu,
struct perf_counter_context *ctx,
struct perf_counter *group_leader,
+ struct perf_counter *parent_counter,
gfp_t gfpflags)
{
const struct pmu *pmu;
@@ -3591,6 +3809,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
counter->ctx = ctx;
counter->oncpu = -1;
+ counter->parent = parent_counter;
+
counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
@@ -3648,11 +3868,13 @@ done:
counter->pmu = pmu;
- atomic_inc(&nr_counters);
- if (counter->attr.mmap)
- atomic_inc(&nr_mmap_counters);
- if (counter->attr.comm)
- atomic_inc(&nr_comm_counters);
+ if (!counter->parent) {
+ atomic_inc(&nr_counters);
+ if (counter->attr.mmap)
+ atomic_inc(&nr_mmap_counters);
+ if (counter->attr.comm)
+ atomic_inc(&nr_comm_counters);
+ }
return counter;
}
@@ -3815,7 +4037,7 @@ SYSCALL_DEFINE5(perf_counter_open,
}
counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
- GFP_KERNEL);
+ NULL, GFP_KERNEL);
ret = PTR_ERR(counter);
if (IS_ERR(counter))
goto err_put_context;
@@ -3881,7 +4103,8 @@ inherit_counter(struct perf_counter *parent_counter,
child_counter = perf_counter_alloc(&parent_counter->attr,
parent_counter->cpu, child_ctx,
- group_leader, GFP_KERNEL);
+ group_leader, parent_counter,
+ GFP_KERNEL);
if (IS_ERR(child_counter))
return child_counter;
get_ctx(child_ctx);
@@ -3904,12 +4127,6 @@ inherit_counter(struct perf_counter *parent_counter,
*/
add_counter_to_ctx(child_counter, child_ctx);
- child_counter->parent = parent_counter;
- /*
- * inherit into child's child as well:
- */
- child_counter->attr.inherit = 1;
-
/*
* Get a reference to the parent filp - we will fput it
* when the child counter exits. This is safe to do because
@@ -3953,10 +4170,14 @@ static int inherit_group(struct perf_counter *parent_counter,
}
static void sync_child_counter(struct perf_counter *child_counter,
- struct perf_counter *parent_counter)
+ struct task_struct *child)
{
+ struct perf_counter *parent_counter = child_counter->parent;
u64 child_val;
+ if (child_counter->attr.inherit_stat)
+ perf_counter_read_event(child_counter, child);
+
child_val = atomic64_read(&child_counter->count);
/*
@@ -3985,7 +4206,8 @@ static void sync_child_counter(struct perf_counter *child_counter,
static void
__perf_counter_exit_task(struct perf_counter *child_counter,
- struct perf_counter_context *child_ctx)
+ struct perf_counter_context *child_ctx,
+ struct task_struct *child)
{
struct perf_counter *parent_counter;
@@ -3999,7 +4221,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter,
* counters need to be zapped - but otherwise linger.
*/
if (parent_counter) {
- sync_child_counter(child_counter, parent_counter);
+ sync_child_counter(child_counter, child);
free_counter(child_counter);
}
}
@@ -4061,7 +4283,7 @@ void perf_counter_exit_task(struct task_struct *child)
again:
list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
list_entry)
- __perf_counter_exit_task(child_counter, child_ctx);
+ __perf_counter_exit_task(child_counter, child_ctx, child);
/*
* If the last counter was a group counter, it will have appended all
diff --git a/kernel/resource.c b/kernel/resource.c
index ac5f3a36923f..78b087221c15 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -787,7 +787,7 @@ static int __init reserve_setup(char *str)
static struct resource reserve[MAXRESERVE];
for (;;) {
- int io_start, io_num;
+ unsigned int io_start, io_num;
int x = reserved;
if (get_option (&str, &io_start) != 2)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 62e4ff9968b5..98e02328c67d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -335,7 +335,10 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_timer_migration,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
{
@@ -744,6 +747,14 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "panic_on_io_nmi",
+ .data = &panic_on_io_nmi,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = KERN_BOOTLOADER_TYPE,
.procname = "bootloader_type",
.data = &bootloader_type,
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index c994530d166d..4cde8b9c716f 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -96,7 +96,7 @@ static DEFINE_MUTEX(show_mutex);
/*
* Collection status, active/inactive:
*/
-static int __read_mostly active;
+int __read_mostly timer_stats_active;
/*
* Beginning/end timestamps of measurement:
@@ -242,7 +242,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
struct entry *entry, input;
unsigned long flags;
- if (likely(!active))
+ if (likely(!timer_stats_active))
return;
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
@@ -254,7 +254,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
input.timer_flag = timer_flag;
spin_lock_irqsave(lock, flags);
- if (!active)
+ if (!timer_stats_active)
goto out_unlock;
entry = tstat_lookup(&input, comm);
@@ -290,7 +290,7 @@ static int tstats_show(struct seq_file *m, void *v)
/*
* If still active then calculate up to now:
*/
- if (active)
+ if (timer_stats_active)
time_stop = ktime_get();
time = ktime_sub(time_stop, time_start);
@@ -368,18 +368,18 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
mutex_lock(&show_mutex);
switch (ctl[0]) {
case '0':
- if (active) {
- active = 0;
+ if (timer_stats_active) {
+ timer_stats_active = 0;
time_stop = ktime_get();
sync_access();
}
break;
case '1':
- if (!active) {
+ if (!timer_stats_active) {
reset_entries();
time_start = ktime_get();
smp_mb();
- active = 1;
+ timer_stats_active = 1;
}
break;
default:
diff --git a/kernel/timer.c b/kernel/timer.c
index 54d3912f8cad..0b36b9e5cc8b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -380,6 +380,8 @@ static void timer_stats_account_timer(struct timer_list *timer)
{
unsigned int flag = 0;
+ if (likely(!timer->start_site))
+ return;
if (unlikely(tbase_get_deferrable(timer->base)))
flag |= TIMER_STATS_FLAG_DEFERRABLE;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3718d55fb4c3..f3716bf04df6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -291,7 +291,9 @@ function_stat_next(void *v, int idx)
pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
again:
- rec++;
+ if (idx != 0)
+ rec++;
+
if ((void *)rec >= (void *)&pg->records[pg->index]) {
pg = pg->next;
if (!pg)
@@ -1417,10 +1419,20 @@ static void *t_hash_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
+
+ if (!(iter->flags & FTRACE_ITER_HASH))
+ *pos = 0;
iter->flags |= FTRACE_ITER_HASH;
- return t_hash_next(m, p, pos);
+ iter->hidx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_hash_next(m, p, &l);
+ if (!p)
+ break;
+ }
+ return p;
}
static int t_hash_show(struct seq_file *m, void *v)
@@ -1467,8 +1479,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
iter->pg = iter->pg->next;
iter->idx = 0;
goto retry;
- } else {
- iter->idx = -1;
}
} else {
rec = &iter->pg->records[iter->idx++];
@@ -1497,6 +1507,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
void *p = NULL;
+ loff_t l;
mutex_lock(&ftrace_lock);
/*
@@ -1508,23 +1519,21 @@ static void *t_start(struct seq_file *m, loff_t *pos)
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- (*pos)++;
return iter;
}
if (iter->flags & FTRACE_ITER_HASH)
return t_hash_start(m, pos);
- if (*pos > 0) {
- if (iter->idx < 0)
- return p;
- (*pos)--;
- iter->idx--;
+ iter->pg = ftrace_pages_start;
+ iter->idx = 0;
+ for (l = 0; l <= *pos; ) {
+ p = t_next(m, p, &l);
+ if (!p)
+ break;
}
- p = t_next(m, p, pos);
-
- if (!p)
+ if (!p && iter->flags & FTRACE_ITER_FILTER)
return t_hash_start(m, pos);
return p;
@@ -2500,32 +2509,31 @@ int ftrace_graph_count;
unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
static void *
-g_next(struct seq_file *m, void *v, loff_t *pos)
+__g_next(struct seq_file *m, loff_t *pos)
{
unsigned long *array = m->private;
- int index = *pos;
- (*pos)++;
-
- if (index >= ftrace_graph_count)
+ if (*pos >= ftrace_graph_count)
return NULL;
+ return &array[*pos];
+}
- return &array[index];
+static void *
+g_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return __g_next(m, pos);
}
static void *g_start(struct seq_file *m, loff_t *pos)
{
- void *p = NULL;
-
mutex_lock(&graph_lock);
/* Nothing, tell g_show to print all functions are enabled */
if (!ftrace_graph_count && !*pos)
return (void *)1;
- p = g_next(m, p, pos);
-
- return p;
+ return __g_next(m, pos);
}
static void g_stop(struct seq_file *m, void *p)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 04dac2638258..bf27bb7a63e2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1563,6 +1563,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
return NULL;
}
+#ifdef CONFIG_TRACING
+
#define TRACE_RECURSIVE_DEPTH 16
static int trace_recursive_lock(void)
@@ -1593,6 +1595,13 @@ static void trace_recursive_unlock(void)
current->trace_recursion--;
}
+#else
+
+#define trace_recursive_lock() (0)
+#define trace_recursive_unlock() do { } while (0)
+
+#endif
+
static DEFINE_PER_CPU(int, rb_need_resched);
/**
@@ -3104,6 +3113,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#ifdef CONFIG_TRACING
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
@@ -3171,6 +3181,7 @@ static __init int rb_init_debugfs(void)
}
fs_initcall(rb_init_debugfs);
+#endif
#ifdef CONFIG_HOTPLUG_CPU
static int rb_cpu_notify(struct notifier_block *self,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 076fa6f0ee48..3aa0a0dfdfa8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -284,13 +284,12 @@ void trace_wake_up(void)
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
- int ret;
if (!str)
return 0;
- ret = strict_strtoul(str, 0, &buf_size);
+ buf_size = memparse(str, &str);
/* nr_entries can not be zero */
- if (ret < 0 || buf_size == 0)
+ if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
@@ -2053,25 +2052,23 @@ static int tracing_open(struct inode *inode, struct file *file)
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
- m->private = t;
-
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
- struct tracer *t = m->private;
+ struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
- for (; t && l < *pos; t = t_next(m, t, &l))
+ for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
@@ -2107,18 +2104,10 @@ static struct seq_operations show_traces_seq_ops = {
static int show_traces_open(struct inode *inode, struct file *file)
{
- int ret;
-
if (tracing_disabled)
return -ENODEV;
- ret = seq_open(file, &show_traces_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = trace_types;
- }
-
- return ret;
+ return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6e735d4771f8..3548ae5cc780 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -597,6 +597,7 @@ print_graph_function(struct trace_iterator *iter)
extern struct pid *ftrace_pid_trace;
+#ifdef CONFIG_FUNCTION_TRACER
static inline int ftrace_trace_task(struct task_struct *task)
{
if (!ftrace_pid_trace)
@@ -604,6 +605,12 @@ static inline int ftrace_trace_task(struct task_struct *task)
return test_tsk_trace_trace(task);
}
+#else
+static inline int ftrace_trace_task(struct task_struct *task)
+{
+ return 1;
+}
+#endif
/*
* trace_iterator_flags is an enumeration that defines bit
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be69a1b6..53c8fd376a88 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -300,10 +300,18 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return t_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = t_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static void *
@@ -332,10 +340,18 @@ s_next(struct seq_file *m, void *v, loff_t *pos)
static void *s_start(struct seq_file *m, loff_t *pos)
{
+ struct ftrace_event_call *call = NULL;
+ loff_t l;
+
mutex_lock(&event_mutex);
- if (*pos == 0)
- m->private = ftrace_events.next;
- return s_next(m, NULL, pos);
+
+ m->private = ftrace_events.next;
+ for (l = 0; l <= *pos; ) {
+ call = s_next(m, NULL, &l);
+ if (!call)
+ break;
+ }
+ return call;
}
static int t_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 90f134764837..7402144bff21 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -302,8 +302,7 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
if (count == -1)
seq_printf(m, ":unlimited\n");
else
- seq_printf(m, ":count=%ld", count);
- seq_putc(m, '\n');
+ seq_printf(m, ":count=%ld\n", count);
return 0;
}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 9bece9687b62..7b6278110827 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -155,25 +155,19 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+t_start(struct seq_file *m, loff_t *pos)
{
- const char **fmt = m->private;
- const char **next = fmt;
-
- (*pos)++;
+ const char **fmt = __start___trace_bprintk_fmt + *pos;
if ((unsigned long)fmt >= (unsigned long)__stop___trace_bprintk_fmt)
return NULL;
-
- next = fmt;
- m->private = ++next;
-
return fmt;
}
-static void *t_start(struct seq_file *m, loff_t *pos)
+static void *t_next(struct seq_file *m, void * v, loff_t *pos)
{
- return t_next(m, NULL, pos);
+ (*pos)++;
+ return t_start(m, pos);
}
static int t_show(struct seq_file *m, void *v)
@@ -224,15 +218,7 @@ static const struct seq_operations show_format_seq_ops = {
static int
ftrace_formats_open(struct inode *inode, struct file *file)
{
- int ret;
-
- ret = seq_open(file, &show_format_seq_ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
-
- m->private = __start___trace_bprintk_fmt;
- }
- return ret;
+ return seq_open(file, &show_format_seq_ops);
}
static const struct file_operations ftrace_formats_fops = {
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index c00643733f4c..e66f5e493342 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -199,17 +199,13 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
mutex_lock(&session->stat_mutex);
/* If we are in the beginning of the file, print the headers */
- if (!*pos && session->ts->stat_headers) {
- (*pos)++;
+ if (!*pos && session->ts->stat_headers)
return SEQ_START_TOKEN;
- }
node = rb_first(&session->stat_root);
for (i = 0; node && i < *pos; i++)
node = rb_next(node);
- (*pos)++;
-
return node;
}
OpenPOWER on IntegriCloud