diff options
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 50 |
1 files changed, 45 insertions, 5 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index dda5b0a3ff60..614615b8d42b 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -225,8 +225,14 @@ struct perf_event_attr { }; __u32 bp_type; - __u64 bp_addr; - __u64 bp_len; + union { + __u64 bp_addr; + __u64 config1; /* extension of config */ + }; + union { + __u64 bp_len; + __u64 config2; /* extension of config1 */ + }; }; /* @@ -464,6 +470,7 @@ enum perf_callchain_context { #define PERF_FLAG_FD_NO_GROUP (1U << 0) #define PERF_FLAG_FD_OUTPUT (1U << 1) +#define PERF_FLAG_PID_CGROUP (1U << 2) /* pid=cgroup id, per-cpu mode only */ #ifdef __KERNEL__ /* @@ -471,6 +478,7 @@ enum perf_callchain_context { */ #ifdef CONFIG_PERF_EVENTS +# include <linux/cgroup.h> # include <asm/perf_event.h> # include <asm/local64.h> #endif @@ -539,6 +547,9 @@ struct hw_perf_event { unsigned long event_base; int idx; int last_cpu; + unsigned int extra_reg; + u64 extra_config; + int extra_alloc; }; struct { /* software */ struct hrtimer hrtimer; @@ -716,6 +727,22 @@ struct swevent_hlist { #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 +#ifdef CONFIG_CGROUP_PERF +/* + * perf_cgroup_info keeps track of time_enabled for a cgroup. + * This is a per-cpu dynamically allocated data structure. + */ +struct perf_cgroup_info { + u64 time; + u64 timestamp; +}; + +struct perf_cgroup { + struct cgroup_subsys_state css; + struct perf_cgroup_info *info; /* timing info, one per cpu */ +}; +#endif + /** * struct perf_event - performance event kernel representation: */ @@ -832,6 +859,11 @@ struct perf_event { struct event_filter *filter; #endif +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; /* cgroup event is attach to */ + int cgrp_defer_enabled; +#endif + #endif /* CONFIG_PERF_EVENTS */ }; @@ -886,6 +918,7 @@ struct perf_event_context { u64 generation; int pin_count; struct rcu_head rcu_head; + int nr_cgroups; /* cgroup events present */ }; /* @@ -905,6 +938,9 @@ struct perf_cpu_context { struct list_head rotation_list; int jiffies_interval; struct pmu *active_pmu; +#ifdef CONFIG_CGROUP_PERF + struct perf_cgroup *cgrp; +#endif }; struct perf_output_handle { @@ -1040,11 +1076,11 @@ have_event: __perf_sw_event(event_id, nr, nmi, regs, addr); } -extern atomic_t perf_task_events; +extern atomic_t perf_sched_events; static inline void perf_event_task_sched_in(struct task_struct *task) { - COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); + COND_STMT(&perf_sched_events, __perf_event_task_sched_in(task)); } static inline @@ -1052,7 +1088,7 @@ void perf_event_task_sched_out(struct task_struct *task, struct task_struct *nex { perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); - COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); + COND_STMT(&perf_sched_events, __perf_event_task_sched_out(task, next)); } extern void perf_event_mmap(struct vm_area_struct *vma); @@ -1083,6 +1119,10 @@ extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; +extern int perf_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + static inline bool perf_paranoid_tracepoint_raw(void) { return sysctl_perf_event_paranoid > -1; |