diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/local64.h | 96 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 4 | ||||
-rw-r--r-- | include/linux/ftrace_event.h | 6 | ||||
-rw-r--r-- | include/linux/kmemtrace.h | 25 | ||||
-rw-r--r-- | include/linux/perf_event.h | 95 | ||||
-rw-r--r-- | include/linux/slab_def.h | 3 | ||||
-rw-r--r-- | include/linux/slub_def.h | 3 | ||||
-rw-r--r-- | include/linux/syscalls.h | 2 | ||||
-rw-r--r-- | include/trace/boot.h | 60 | ||||
-rw-r--r-- | include/trace/events/sched.h | 32 | ||||
-rw-r--r-- | include/trace/events/timer.h | 80 | ||||
-rw-r--r-- | include/trace/ftrace.h | 4 | ||||
-rw-r--r-- | include/trace/syscall.h | 1 |
13 files changed, 193 insertions, 218 deletions
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h new file mode 100644 index 000000000000..02ac760c1a8b --- /dev/null +++ b/include/asm-generic/local64.h @@ -0,0 +1,96 @@ +#ifndef _ASM_GENERIC_LOCAL64_H +#define _ASM_GENERIC_LOCAL64_H + +#include <linux/percpu.h> +#include <asm/types.h> + +/* + * A signed long type for operations which are atomic for a single CPU. + * Usually used in combination with per-cpu variables. + * + * This is the default implementation, which uses atomic64_t. Which is + * rather pointless. The whole point behind local64_t is that some processors + * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs + * running on this CPU. local64_t allows exploitation of such capabilities. + */ + +/* Implement in terms of atomics. */ + +#if BITS_PER_LONG == 64 + +#include <asm/local.h> + +typedef struct { + local_t a; +} local64_t; + +#define LOCAL64_INIT(i) { LOCAL_INIT(i) } + +#define local64_read(l) local_read(&(l)->a) +#define local64_set(l,i) local_set((&(l)->a),(i)) +#define local64_inc(l) local_inc(&(l)->a) +#define local64_dec(l) local_dec(&(l)->a) +#define local64_add(i,l) local_add((i),(&(l)->a)) +#define local64_sub(i,l) local_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) local_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) local_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) local_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) local_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) local_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) local_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) local_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#else /* BITS_PER_LONG != 64 */ + +#include <asm/atomic.h> + +/* Don't use typedef: don't want them to be mixed with atomic_t's. */ +typedef struct { + atomic64_t a; +} local64_t; + +#define LOCAL64_INIT(i) { ATOMIC_LONG_INIT(i) } + +#define local64_read(l) atomic64_read(&(l)->a) +#define local64_set(l,i) atomic64_set((&(l)->a),(i)) +#define local64_inc(l) atomic64_inc(&(l)->a) +#define local64_dec(l) atomic64_dec(&(l)->a) +#define local64_add(i,l) atomic64_add((i),(&(l)->a)) +#define local64_sub(i,l) atomic64_sub((i),(&(l)->a)) + +#define local64_sub_and_test(i, l) atomic64_sub_and_test((i), (&(l)->a)) +#define local64_dec_and_test(l) atomic64_dec_and_test(&(l)->a) +#define local64_inc_and_test(l) atomic64_inc_and_test(&(l)->a) +#define local64_add_negative(i, l) atomic64_add_negative((i), (&(l)->a)) +#define local64_add_return(i, l) atomic64_add_return((i), (&(l)->a)) +#define local64_sub_return(i, l) atomic64_sub_return((i), (&(l)->a)) +#define local64_inc_return(l) atomic64_inc_return(&(l)->a) + +#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) +#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) +#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) + +/* Non-atomic variants, ie. preemption disabled and won't be touched + * in interrupt, etc. Some archs can optimize this case well. */ +#define __local64_inc(l) local64_set((l), local64_read(l) + 1) +#define __local64_dec(l) local64_set((l), local64_read(l) - 1) +#define __local64_add(i,l) local64_set((l), local64_read(l) + (i)) +#define __local64_sub(i,l) local64_set((l), local64_read(l) - (i)) + +#endif /* BITS_PER_LONG != 64 */ + +#endif /* _ASM_GENERIC_LOCAL64_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 48c5299cbf26..415b1a9118e7 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -150,10 +150,6 @@ CPU_KEEP(exit.data) \ MEM_KEEP(init.data) \ MEM_KEEP(exit.data) \ - . = ALIGN(8); \ - VMLINUX_SYMBOL(__start___markers) = .; \ - *(__markers) \ - VMLINUX_SYMBOL(__stop___markers) = .; \ . = ALIGN(32); \ VMLINUX_SYMBOL(__start___tracepoints) = .; \ *(__tracepoints) \ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 3167f2df4126..01df7ca4ead7 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -146,6 +146,9 @@ struct ftrace_event_class { int (*raw_init)(struct ftrace_event_call *); }; +extern int ftrace_event_reg(struct ftrace_event_call *event, + enum trace_reg type); + enum { TRACE_EVENT_FL_ENABLED_BIT, TRACE_EVENT_FL_FILTERED_BIT, @@ -257,8 +260,7 @@ static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, u64 count, struct pt_regs *regs, void *head) { - perf_tp_event(addr, count, raw_data, size, regs, head); - perf_swevent_put_recursion_context(rctx); + perf_tp_event(addr, count, raw_data, size, regs, head, rctx); } #endif diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h deleted file mode 100644 index b616d3930c3b..000000000000 --- a/include/linux/kmemtrace.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2008 Eduard - Gabriel Munteanu - * - * This file is released under GPL version 2. - */ - -#ifndef _LINUX_KMEMTRACE_H -#define _LINUX_KMEMTRACE_H - -#ifdef __KERNEL__ - -#include <trace/events/kmem.h> - -#ifdef CONFIG_KMEMTRACE -extern void kmemtrace_init(void); -#else -static inline void kmemtrace_init(void) -{ -} -#endif - -#endif /* __KERNEL__ */ - -#endif /* _LINUX_KMEMTRACE_H */ - diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d94985..937495c25073 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -214,8 +214,9 @@ struct perf_event_attr { * See also PERF_RECORD_MISC_EXACT_IP */ precise_ip : 2, /* skid constraint */ + mmap_data : 1, /* non-exec mmap data */ - __reserved_1 : 47; + __reserved_1 : 46; union { __u32 wakeup_events; /* wakeup every n events */ @@ -461,6 +462,7 @@ enum perf_callchain_context { #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> +# include <asm/local64.h> #endif struct perf_guest_info_callbacks { @@ -531,14 +533,16 @@ struct hw_perf_event { struct hrtimer hrtimer; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT - /* breakpoint */ - struct arch_hw_breakpoint info; + struct { /* breakpoint */ + struct arch_hw_breakpoint info; + struct list_head bp_list; + }; #endif }; - atomic64_t prev_count; + local64_t prev_count; u64 sample_period; u64 last_period; - atomic64_t period_left; + local64_t period_left; u64 interrupts; u64 freq_time_stamp; @@ -548,7 +552,10 @@ struct hw_perf_event { struct perf_event; -#define PERF_EVENT_TXN_STARTED 1 +/* + * Common implementation detail of pmu::{start,commit,cancel}_txn + */ +#define PERF_EVENT_TXN 0x1 /** * struct pmu - generic performance monitoring unit @@ -562,14 +569,28 @@ struct pmu { void (*unthrottle) (struct perf_event *event); /* - * group events scheduling is treated as a transaction, - * add group events as a whole and perform one schedulability test. - * If test fails, roll back the whole group + * Group events scheduling is treated as a transaction, add group + * events as a whole and perform one schedulability test. If the test + * fails, roll back the whole group */ + /* + * Start the transaction, after this ->enable() doesn't need + * to do schedulability tests. + */ void (*start_txn) (const struct pmu *pmu); - void (*cancel_txn) (const struct pmu *pmu); + /* + * If ->start_txn() disabled the ->enable() schedulability test + * then ->commit_txn() is required to perform one. On success + * the transaction is closed. On error the transaction is kept + * open until ->cancel_txn() is called. + */ int (*commit_txn) (const struct pmu *pmu); + /* + * Will cancel the transaction, assumes ->disable() is called for + * each successfull ->enable() during the transaction. + */ + void (*cancel_txn) (const struct pmu *pmu); }; /** @@ -584,7 +605,9 @@ enum perf_event_active_state { struct file; -struct perf_mmap_data { +#define PERF_BUFFER_WRITABLE 0x01 + +struct perf_buffer { atomic_t refcount; struct rcu_head rcu_head; #ifdef CONFIG_PERF_USE_VMALLOC @@ -650,7 +673,8 @@ struct perf_event { enum perf_event_active_state state; unsigned int attach_state; - atomic64_t count; + local64_t count; + atomic64_t child_count; /* * These are the total time in nanoseconds that the event @@ -709,7 +733,7 @@ struct perf_event { atomic_t mmap_count; int mmap_locked; struct user_struct *mmap_user; - struct perf_mmap_data *data; + struct perf_buffer *buffer; /* poll related */ wait_queue_head_t waitq; @@ -807,7 +831,7 @@ struct perf_cpu_context { struct perf_output_handle { struct perf_event *event; - struct perf_mmap_data *data; + struct perf_buffer *buffer; unsigned long wakeup; unsigned long size; void *addr; @@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); -extern void -perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); +#ifndef perf_arch_fetch_caller_regs +static inline void +perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } +#endif /* * Take a snapshot of the regs. Skip ip and frame pointer to @@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); * - bp for callchains * - eflags, for future purposes, just in case */ -static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) +static inline void perf_fetch_caller_regs(struct pt_regs *regs) { - unsigned long ip; - memset(regs, 0, sizeof(*regs)); - switch (skip) { - case 1 : - ip = CALLER_ADDR0; - break; - case 2 : - ip = CALLER_ADDR1; - break; - case 3 : - ip = CALLER_ADDR2; - break; - case 4: - ip = CALLER_ADDR3; - break; - /* No need to support further for now */ - default: - ip = 0; - } - - return perf_arch_fetch_caller_regs(regs, ip, skip); + perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static inline void @@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) struct pt_regs hot_regs; if (!regs) { - perf_fetch_caller_regs(&hot_regs, 1); + perf_fetch_caller_regs(&hot_regs); regs = &hot_regs; } __perf_sw_event(event_id, nr, nmi, regs, addr); } } -extern void __perf_event_mmap(struct vm_area_struct *vma); - -static inline void perf_event_mmap(struct vm_area_struct *vma) -{ - if (vma->vm_flags & VM_EXEC) - __perf_event_mmap(vma); -} - +extern void perf_event_mmap(struct vm_area_struct *vma); extern struct perf_guest_info_callbacks *perf_guest_cbs; extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); @@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, - struct hlist_head *head); + struct hlist_head *head, int rctx); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 1812dac8c496..1acfa73ce2ac 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,7 +14,8 @@ #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> -#include <linux/kmemtrace.h> + +#include <trace/events/kmem.h> #ifndef ARCH_KMALLOC_MINALIGN /* diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4ba59cfc1f75..6447a723ecb1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,9 +10,10 @@ #include <linux/gfp.h> #include <linux/workqueue.h> #include <linux/kobject.h> -#include <linux/kmemtrace.h> #include <linux/kmemleak.h> +#include <trace/events/kmem.h> + enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 13ebb5413a79..a6bfd1367d2a 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -167,7 +167,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter_##sname, \ .exit_event = &event_exit_##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ }; #define SYSCALL_DEFINE0(sname) \ @@ -182,7 +181,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; .enter_event = &event_enter__##sname, \ .exit_event = &event_exit__##sname, \ .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ - .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ }; \ asmlinkage long sys_##sname(void) #else diff --git a/include/trace/boot.h b/include/trace/boot.h deleted file mode 100644 index 088ea089e31d..000000000000 --- a/include/trace/boot.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _LINUX_TRACE_BOOT_H -#define _LINUX_TRACE_BOOT_H - -#include <linux/module.h> -#include <linux/kallsyms.h> -#include <linux/init.h> - -/* - * Structure which defines the trace of an initcall - * while it is called. - * You don't have to fill the func field since it is - * only used internally by the tracer. - */ -struct boot_trace_call { - pid_t caller; - char func[KSYM_SYMBOL_LEN]; -}; - -/* - * Structure which defines the trace of an initcall - * while it returns. - */ -struct boot_trace_ret { - char func[KSYM_SYMBOL_LEN]; - int result; - unsigned long long duration; /* nsecs */ -}; - -#ifdef CONFIG_BOOT_TRACER -/* Append the traces on the ring-buffer */ -extern void trace_boot_call(struct boot_trace_call *bt, initcall_t fn); -extern void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn); - -/* Tells the tracer that smp_pre_initcall is finished. - * So we can start the tracing - */ -extern void start_boot_trace(void); - -/* Resume the tracing of other necessary events - * such as sched switches - */ -extern void enable_boot_trace(void); - -/* Suspend this tracing. Actually, only sched_switches tracing have - * to be suspended. Initcalls doesn't need it.) - */ -extern void disable_boot_trace(void); -#else -static inline -void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) { } - -static inline -void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) { } - -static inline void start_boot_trace(void) { } -static inline void enable_boot_trace(void) { } -static inline void disable_boot_trace(void) { } -#endif /* CONFIG_BOOT_TRACER */ - -#endif /* __LINUX_TRACE_BOOT_H */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index b9e1dd6c6208..9208c92aeab5 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -50,31 +50,6 @@ TRACE_EVENT(sched_kthread_stop_ret, ); /* - * Tracepoint for waiting on task to unschedule: - */ -TRACE_EVENT(sched_wait_task, - - TP_PROTO(struct task_struct *p), - - TP_ARGS(p), - - TP_STRUCT__entry( - __array( char, comm, TASK_COMM_LEN ) - __field( pid_t, pid ) - __field( int, prio ) - ), - - TP_fast_assign( - memcpy(__entry->comm, p->comm, TASK_COMM_LEN); - __entry->pid = p->pid; - __entry->prio = p->prio; - ), - - TP_printk("comm=%s pid=%d prio=%d", - __entry->comm, __entry->pid, __entry->prio) -); - -/* * Tracepoint for waking up a task: */ DECLARE_EVENT_CLASS(sched_wakeup_template, @@ -240,6 +215,13 @@ DEFINE_EVENT(sched_process_template, sched_process_exit, TP_ARGS(p)); /* + * Tracepoint for waiting on task to unschedule: + */ +DEFINE_EVENT(sched_process_template, sched_wait_task, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)); + +/* * Tracepoint for a waiting task: */ TRACE_EVENT(sched_process_wait, diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 9496b965d62a..c624126a9c8a 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -8,11 +8,7 @@ #include <linux/hrtimer.h> #include <linux/timer.h> -/** - * timer_init - called when the timer is initialized - * @timer: pointer to struct timer_list - */ -TRACE_EVENT(timer_init, +DECLARE_EVENT_CLASS(timer_class, TP_PROTO(struct timer_list *timer), @@ -30,6 +26,17 @@ TRACE_EVENT(timer_init, ); /** + * timer_init - called when the timer is initialized + * @timer: pointer to struct timer_list + */ +DEFINE_EVENT(timer_class, timer_init, + + TP_PROTO(struct timer_list *timer), + + TP_ARGS(timer) +); + +/** * timer_start - called when the timer is started * @timer: pointer to struct timer_list * @expires: the timers expiry time @@ -94,42 +101,22 @@ TRACE_EVENT(timer_expire_entry, * NOTE: Do NOT derefernce timer in TP_fast_assign. The pointer might * be invalid. We solely track the pointer. */ -TRACE_EVENT(timer_expire_exit, +DEFINE_EVENT(timer_class, timer_expire_exit, TP_PROTO(struct timer_list *timer), - TP_ARGS(timer), - - TP_STRUCT__entry( - __field(void *, timer ) - ), - - TP_fast_assign( - __entry->timer = timer; - ), - - TP_printk("timer=%p", __entry->timer) + TP_ARGS(timer) ); /** * timer_cancel - called when the timer is canceled * @timer: pointer to struct timer_list */ -TRACE_EVENT(timer_cancel, +DEFINE_EVENT(timer_class, timer_cancel, TP_PROTO(struct timer_list *timer), - TP_ARGS(timer), - - TP_STRUCT__entry( - __field( void *, timer ) - ), - - TP_fast_assign( - __entry->timer = timer; - ), - - TP_printk("timer=%p", __entry->timer) + TP_ARGS(timer) ); /** @@ -224,14 +211,7 @@ TRACE_EVENT(hrtimer_expire_entry, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); -/** - * hrtimer_expire_exit - called immediately after the hrtimer callback returns - * @timer: pointer to struct hrtimer - * - * When used in combination with the hrtimer_expire_entry tracepoint we can - * determine the runtime of the callback function. - */ -TRACE_EVENT(hrtimer_expire_exit, +DECLARE_EVENT_CLASS(hrtimer_class, TP_PROTO(struct hrtimer *hrtimer), @@ -249,24 +229,28 @@ TRACE_EVENT(hrtimer_expire_exit, ); /** - * hrtimer_cancel - called when the hrtimer is canceled - * @hrtimer: pointer to struct hrtimer + * hrtimer_expire_exit - called immediately after the hrtimer callback returns + * @timer: pointer to struct hrtimer + * + * When used in combination with the hrtimer_expire_entry tracepoint we can + * determine the runtime of the callback function. */ -TRACE_EVENT(hrtimer_cancel, +DEFINE_EVENT(hrtimer_class, hrtimer_expire_exit, TP_PROTO(struct hrtimer *hrtimer), - TP_ARGS(hrtimer), + TP_ARGS(hrtimer) +); - TP_STRUCT__entry( - __field( void *, hrtimer ) - ), +/** + * hrtimer_cancel - called when the hrtimer is canceled + * @hrtimer: pointer to struct hrtimer + */ +DEFINE_EVENT(hrtimer_class, hrtimer_cancel, - TP_fast_assign( - __entry->hrtimer = hrtimer; - ), + TP_PROTO(struct hrtimer *hrtimer), - TP_printk("hrtimer=%p", __entry->hrtimer) + TP_ARGS(hrtimer) ); /** diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 5a64905d7278..55c1fd1bbc3d 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h @@ -439,6 +439,7 @@ static inline notrace int ftrace_get_offsets_##call( \ * .fields = LIST_HEAD_INIT(event_class_##call.fields), * .raw_init = trace_event_raw_init, * .probe = ftrace_raw_event_##call, + * .reg = ftrace_event_reg, * }; * * static struct ftrace_event_call __used @@ -567,6 +568,7 @@ static struct ftrace_event_class __used event_class_##call = { \ .fields = LIST_HEAD_INIT(event_class_##call.fields),\ .raw_init = trace_event_raw_init, \ .probe = ftrace_raw_event_##call, \ + .reg = ftrace_event_reg, \ _TRACE_PERF_INIT(call) \ }; @@ -705,7 +707,7 @@ perf_trace_##call(void *__data, proto) \ int __data_size; \ int rctx; \ \ - perf_fetch_caller_regs(&__regs, 1); \ + perf_fetch_caller_regs(&__regs); \ \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ diff --git a/include/trace/syscall.h b/include/trace/syscall.h index 257e08960d7b..31966a4fb8cc 100644 --- a/include/trace/syscall.h +++ b/include/trace/syscall.h @@ -26,7 +26,6 @@ struct syscall_metadata { const char **types; const char **args; struct list_head enter_fields; - struct list_head exit_fields; struct ftrace_event_call *enter_event; struct ftrace_event_call *exit_event; |