summaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/ftrace.h73
1 files changed, 0 insertions, 73 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 698f2a890322..40dc5e8fe340 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -619,79 +619,6 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-/*
- * Define the insertion callback to perf events
- *
- * The job is very similar to ftrace_raw_event_<call> except that we don't
- * insert in the ring buffer but in a perf counter.
- *
- * static void ftrace_perf_<call>(proto)
- * {
- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
- * struct ftrace_event_call *event_call = &event_<call>;
- * extern void perf_tp_event(int, u64, u64, void *, int);
- * struct ftrace_raw_##call *entry;
- * struct perf_trace_buf *trace_buf;
- * u64 __addr = 0, __count = 1;
- * unsigned long irq_flags;
- * struct trace_entry *ent;
- * int __entry_size;
- * int __data_size;
- * int __cpu
- * int pc;
- *
- * pc = preempt_count();
- *
- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
- *
- * // Below we want to get the aligned size by taking into account
- * // the u32 field that will later store the buffer size
- * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
- * sizeof(u64));
- * __entry_size -= sizeof(u32);
- *
- * // Protect the non nmi buffer
- * // This also protects the rcu read side
- * local_irq_save(irq_flags);
- * __cpu = smp_processor_id();
- *
- * if (in_nmi())
- * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
- * else
- * trace_buf = rcu_dereference_sched(perf_trace_buf);
- *
- * if (!trace_buf)
- * goto end;
- *
- * trace_buf = per_cpu_ptr(trace_buf, __cpu);
- *
- * // Avoid recursion from perf that could mess up the buffer
- * if (trace_buf->recursion++)
- * goto end_recursion;
- *
- * raw_data = trace_buf->buf;
- *
- * // Make recursion update visible before entering perf_tp_event
- * // so that we protect from perf recursions.
- *
- * barrier();
- *
- * //zero dead bytes from alignment to avoid stack leak to userspace:
- * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
- * entry = (struct ftrace_raw_<call> *)raw_data;
- * ent = &entry->ent;
- * tracing_generic_entry_update(ent, irq_flags, pc);
- * ent->type = event_call->id;
- *
- * <tstruct> <- do some jobs with dynamic arrays
- *
- * <assign> <- affect our values
- *
- * perf_tp_event(event_call->id, __addr, __count, entry,
- * __entry_size); <- submit them to perf counter
- *
- * }
- */
#ifdef CONFIG_PERF_EVENTS
OpenPOWER on IntegriCloud