summaryrefslogtreecommitdiffstats
path: root/arch/x86/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-03-11 23:39:39 +0100
committerIngo Molnar <mingo@kernel.org>2016-03-21 09:08:22 +0100
commit27348f382b6786fd201779246ee70fa115a5b890 (patch)
tree1da85f95f36fb75ada58df1f63781f04d65703e7 /arch/x86/events
parente7ee3e8cb550ce43752ae1d1b190d6b5c4150a43 (diff)
downloadblackbird-op-linux-27348f382b6786fd201779246ee70fa115a5b890.tar.gz
blackbird-op-linux-27348f382b6786fd201779246ee70fa115a5b890.zip
perf/x86/cqm: Factor out some common code
Having the same code twice (and once quite ugly) is fragile. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/events')
-rw-r--r--arch/x86/events/intel/cqm.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 380d62da8108..7b5fd811ef45 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -463,6 +463,14 @@ static bool is_mbm_event(int e)
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
}
+static void cqm_mask_call(struct rmid_read *rr)
+{
+ if (is_mbm_event(rr->evt_type))
+ on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, rr, 1);
+ else
+ on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
+}
+
/*
* Exchange the RMID of a group of events.
*/
@@ -479,18 +487,12 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
*/
if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
struct rmid_read rr = {
- .value = ATOMIC64_INIT(0),
.rmid = old_rmid,
+ .evt_type = group->attr.config,
+ .value = ATOMIC64_INIT(0),
};
- if (is_mbm_event(group->attr.config)) {
- rr.evt_type = group->attr.config;
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count,
- &rr, 1);
- } else {
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count,
- &rr, 1);
- }
+ cqm_mask_call(&rr);
local64_set(&group->count, atomic64_read(&rr.value));
}
@@ -1180,6 +1182,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
{
unsigned long flags;
struct rmid_read rr = {
+ .evt_type = event->attr.config,
.value = ATOMIC64_INIT(0),
};
@@ -1229,12 +1232,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
if (!__rmid_valid(rr.rmid))
goto out;
- if (is_mbm_event(event->attr.config)) {
- rr.evt_type = event->attr.config;
- on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_count, &rr, 1);
- } else {
- on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, &rr, 1);
- }
+ cqm_mask_call(&rr);
raw_spin_lock_irqsave(&cache_lock, flags);
if (event->hw.cqm_rmid == rr.rmid)
OpenPOWER on IntegriCloud