summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/system.h4
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/perf_callchain.c86
-rw-r--r--arch/powerpc/kernel/perf_event.c166
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c148
-rw-r--r--arch/powerpc/kernel/time.c42
-rw-r--r--arch/powerpc/platforms/512x/clock.c2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c9
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c8
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/xics.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c4
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c22
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c18
16 files changed, 279 insertions, 247 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 631e5a0fb6ab..4b1e521d966f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -138,6 +138,7 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 1ff6662f7faf..9b287fdd8ea3 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -129,7 +129,7 @@ struct paca_struct {
u8 soft_enabled; /* irq soft-enable flag */
u8 hard_enabled; /* set if irqs are enabled in MSR */
u8 io_sync; /* writel() needs spin_unlock sync */
- u8 perf_event_pending; /* PM interrupt while soft-disabled */
+ u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
/* Stuff for accurate time accounting */
u64 user_time; /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 6c294acac848..9c3d160670b4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -542,10 +542,6 @@ extern void reloc_got2(unsigned long);
#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
-extern void account_system_vtime(struct task_struct *);
-#endif
-
extern struct dentry *powerpc_debugfs_root;
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 477c663e0140..49cee9df225b 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -63,11 +63,6 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
- int err;
-
- err = module_bug_finalize(hdr, sechdrs, me);
- if (err)
- return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
@@ -101,5 +96,4 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
- module_bug_cleanup(mod);
}
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c
index 95ad9dad298e..d05ae4204bbf 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/kernel/perf_callchain.c
@@ -23,18 +23,6 @@
#include "ppc32.h"
#endif
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
- unsigned int nr = entry->nr;
-
- if (nr < PERF_MAX_STACK_DEPTH) {
- entry->ip[nr] = ip;
- entry->nr = nr + 1;
- }
-}
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-static void perf_callchain_kernel(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_KERNEL);
- callchain_store(entry, regs->nip);
+ perf_callchain_store(entry, regs->nip);
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
next_ip = regs->nip;
lr = regs->link;
level = 0;
- callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
++level;
}
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp))
return;
sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
puc == (unsigned long) &sf->uc;
}
-static void perf_callchain_user_64(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
for (;;) {
fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
return __get_user_inatomic(*ret, ptr);
}
-static inline void perf_callchain_user_64(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
}
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
return mctx->mc_gregs;
}
-static void perf_callchain_user_32(struct pt_regs *regs,
- struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
{
unsigned int sp, next_sp;
unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
- callchain_store(entry, PERF_CONTEXT_USER);
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
- callchain_store(entry, next_ip);
+ perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
- struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
- entry->nr = 0;
-
- if (!user_mode(regs)) {
- perf_callchain_kernel(regs, entry);
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- if (current_is_64bit())
- perf_callchain_user_64(regs, entry);
- else
- perf_callchain_user_32(regs, entry);
- }
-
- return entry;
+ if (current_is_64bit())
+ perf_callchain_user_64(entry, regs);
+ else
+ perf_callchain_user_32(entry, regs);
}
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index d301a30445e0..3129c855933c 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -402,6 +402,9 @@ static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
if (!event->hw.idx)
return;
/*
@@ -517,7 +520,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-void hw_perf_disable(void)
+static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -565,7 +568,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-void hw_perf_enable(void)
+static void power_pmu_enable(struct pmu *pmu)
{
struct perf_event *event;
struct cpu_hw_events *cpuhw;
@@ -672,6 +675,8 @@ void hw_perf_enable(void)
}
local64_set(&event->hw.prev_count, val);
event->hw.idx = idx;
+ if (event->hw.state & PERF_HES_STOPPED)
+ val = 0;
write_pmc(idx, val);
perf_event_update_userpage(event);
}
@@ -727,7 +732,7 @@ static int collect_events(struct perf_event *group, int max_count,
* re-enable the PMU in order to get hw_perf_enable to do the
* actual work of reconfiguring the PMU.
*/
-static int power_pmu_enable(struct perf_event *event)
+static int power_pmu_add(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -735,7 +740,7 @@ static int power_pmu_enable(struct perf_event *event)
int ret = -EAGAIN;
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
/*
* Add the event to the list (if there is room)
@@ -749,6 +754,9 @@ static int power_pmu_enable(struct perf_event *event)
cpuhw->events[n0] = event->hw.config;
cpuhw->flags[n0] = event->hw.event_base;
+ if (!(ef_flags & PERF_EF_START))
+ event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
/*
* If group events scheduling transaction was started,
* skip the schedulability test here, it will be peformed
@@ -769,7 +777,7 @@ nocheck:
ret = 0;
out:
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@@ -777,14 +785,14 @@ nocheck:
/*
* Remove a event from the PMU.
*/
-static void power_pmu_disable(struct perf_event *event)
+static void power_pmu_del(struct perf_event *event, int ef_flags)
{
struct cpu_hw_events *cpuhw;
long i;
unsigned long flags;
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
power_pmu_read(event);
@@ -821,34 +829,60 @@ static void power_pmu_disable(struct perf_event *event)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
+ * POWER-PMU does not support disabling individual counters, hence
+ * program their cycle counter to their max value and ignore the interrupts.
*/
-static void power_pmu_unthrottle(struct perf_event *event)
+
+static void power_pmu_start(struct perf_event *event, int ef_flags)
+{
+ unsigned long flags;
+ s64 left;
+
+ if (!event->hw.idx || !event->hw.sample_period)
+ return;
+
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ return;
+
+ if (ef_flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+
+ event->hw.state = 0;
+ left = local64_read(&event->hw.period_left);
+ write_pmc(event->hw.idx, left);
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+}
+
+static void power_pmu_stop(struct perf_event *event, int ef_flags)
{
- s64 val, left;
unsigned long flags;
if (!event->hw.idx || !event->hw.sample_period)
return;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
local_irq_save(flags);
- perf_disable();
+ perf_pmu_disable(event->pmu);
+
power_pmu_read(event);
- left = event->hw.sample_period;
- event->hw.last_period = left;
- val = 0;
- if (left < 0x80000000L)
- val = 0x80000000L - left;
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ write_pmc(event->hw.idx, 0);
+
perf_event_update_userpage(event);
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -857,10 +891,11 @@ static void power_pmu_unthrottle(struct perf_event *event)
* Set the flag to make pmu::enable() not perform the
* schedulability test, it will be performed at commit time
*/
-void power_pmu_start_txn(const struct pmu *pmu)
+void power_pmu_start_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events;
}
@@ -870,11 +905,12 @@ void power_pmu_start_txn(const struct pmu *pmu)
* Clear the flag and pmu::enable() will perform the
* schedulability test.
*/
-void power_pmu_cancel_txn(const struct pmu *pmu)
+void power_pmu_cancel_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
}
/*
@@ -882,7 +918,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
* Perform the group schedulability test as a whole
* Return 0 if success
*/
-int power_pmu_commit_txn(const struct pmu *pmu)
+int power_pmu_commit_txn(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
long i, n;
@@ -901,19 +937,10 @@ int power_pmu_commit_txn(const struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN;
+ perf_pmu_enable(pmu);
return 0;
}
-struct pmu power_pmu = {
- .enable = power_pmu_enable,
- .disable = power_pmu_disable,
- .read = power_pmu_read,
- .unthrottle = power_pmu_unthrottle,
- .start_txn = power_pmu_start_txn,
- .cancel_txn = power_pmu_cancel_txn,
- .commit_txn = power_pmu_commit_txn,
-};
-
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
@@ -1014,7 +1041,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
unsigned long flags;
@@ -1026,25 +1053,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
struct cpu_hw_events *cpuhw;
if (!ppmu)
- return ERR_PTR(-ENXIO);
+ return -ENOENT;
+
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return ERR_PTR(err);
+ return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
- return ERR_PTR(-EINVAL);
+ return -ENOENT;
}
+
event->hw.config_base = ev;
event->hw.idx = 0;
@@ -1063,7 +1092,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
* XXX we should check if the task is an idle task.
*/
flags = 0;
- if (event->ctx->task)
+ if (event->attach_state & PERF_ATTACH_TASK)
flags |= PPMU_ONLY_COUNT_RUN;
/*
@@ -1081,7 +1110,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
*/
ev = normal_pmc_alternative(ev, flags);
if (!ev)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
}
@@ -1095,19 +1124,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader, ppmu->n_counter - 1,
ctrs, events, cflags);
if (n < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
events[n] = ev;
ctrs[n] = event;
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
put_cpu_var(cpu_hw_events);
if (err)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
event->hw.config = events[n];
event->hw.event_base = cflags[n];
@@ -1132,11 +1161,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;
- if (err)
- return ERR_PTR(err);
- return &power_pmu;
+ return err;
}
+struct pmu power_pmu = {
+ .pmu_enable = power_pmu_enable,
+ .pmu_disable = power_pmu_disable,
+ .event_init = power_pmu_event_init,
+ .add = power_pmu_add,
+ .del = power_pmu_del,
+ .start = power_pmu_start,
+ .stop = power_pmu_stop,
+ .read = power_pmu_read,
+ .start_txn = power_pmu_start_txn,
+ .cancel_txn = power_pmu_cancel_txn,
+ .commit_txn = power_pmu_commit_txn,
+};
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -1149,6 +1190,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
s64 prev, delta, left;
int record = 0;
+ if (event->hw.state & PERF_HES_STOPPED) {
+ write_pmc(event->hw.idx, 0);
+ return;
+ }
+
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -1171,6 +1217,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
val = 0x80000000LL - left;
}
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
+
/*
* Finally record data if requested.
*/
@@ -1183,23 +1234,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (perf_event_overflow(event, nmi, &data, regs)) {
- /*
- * Interrupts are coming too fast - throttle them
- * by setting the event to 0, so it will be
- * at least 2^30 cycles until the next interrupt
- * (assuming each event counts at most 2 counts
- * per cycle).
- */
- val = 0;
- left = ~0ULL >> 1;
- }
+ if (perf_event_overflow(event, nmi, &data, regs))
+ power_pmu_stop(event, 0);
}
-
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
}
/*
@@ -1342,6 +1379,7 @@ int register_power_pmu(struct power_pmu *pmu)
freeze_events_kernel = MMCR0_FCHV;
#endif /* CONFIG_PPC64 */
+ perf_pmu_register(&power_pmu);
perf_cpu_notifier(power_pmu_notifier);
return 0;
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 1ba45471ae43..7ecca59ddf77 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event)
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
-void hw_perf_disable(void)
+static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -216,7 +219,7 @@ void hw_perf_disable(void)
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
-void hw_perf_enable(void)
+static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-/* perf must be disabled, context locked on entry */
-static int fsl_emb_pmu_enable(struct perf_event *event)
+/* context locked on entry */
+static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
u64 val;
int i;
+ perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
+
+ if (!(flags & PERF_EF_START)) {
+ event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ val = 0;
+ }
+
write_pmc(i, val);
perf_event_update_userpage(event);
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
ret = 0;
out:
put_cpu_var(cpu_hw_events);
+ perf_pmu_enable(event->pmu);
return ret;
}
-/* perf must be disabled, context locked on entry */
-static void fsl_emb_pmu_disable(struct perf_event *event)
+/* context locked on entry */
+static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
+ perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
cpuhw->n_events--;
out:
+ perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
-/*
- * Re-enable interrupts on a event after they were throttled
- * because they were coming too fast.
- *
- * Context is locked on entry, but perf is not disabled.
- */
-static void fsl_emb_pmu_unthrottle(struct perf_event *event)
+static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
- s64 val, left;
unsigned long flags;
+ s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
+
+ if (!(event->hw.state & PERF_HES_STOPPED))
+ return;
+
+ if (ef_flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
local_irq_save(flags);
- perf_disable();
- fsl_emb_pmu_read(event);
- left = event->hw.sample_period;
- event->hw.last_period = left;
- val = 0;
- if (left < 0x80000000L)
- val = 0x80000000L - left;
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
+ perf_pmu_disable(event->pmu);
+
+ event->hw.state = 0;
+ left = local64_read(&event->hw.period_left);
+ write_pmc(event->hw.idx, left);
+
perf_event_update_userpage(event);
- perf_enable();
+ perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
-static struct pmu fsl_emb_pmu = {
- .enable = fsl_emb_pmu_enable,
- .disable = fsl_emb_pmu_disable,
- .read = fsl_emb_pmu_read,
- .unthrottle = fsl_emb_pmu_unthrottle,
-};
+static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
+{
+ unsigned long flags;
+
+ if (event->hw.idx < 0 || !event->hw.sample_period)
+ return;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ local_irq_save(flags);
+ perf_pmu_disable(event->pmu);
+
+ fsl_emb_pmu_read(event);
+ event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ write_pmc(event->hw.idx, 0);
+
+ perf_event_update_userpage(event);
+ perf_pmu_enable(event->pmu);
+ local_irq_restore(flags);
+}
/*
* Release the PMU if this is the last perf_event.
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp)
return 0;
}
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
- return ERR_PTR(err);
+ return err;
break;
case PERF_TYPE_RAW:
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
break;
default:
- return ERR_PTR(-EINVAL);
+ return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
}
if (num_restricted >= ppmu->n_restricted)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
event->hw.idx = -1;
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
- return ERR_PTR(-ENOTSUPP);
+ return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
}
event->destroy = hw_perf_event_destroy;
- if (err)
- return ERR_PTR(err);
- return &fsl_emb_pmu;
+ return err;
}
+static struct pmu fsl_emb_pmu = {
+ .pmu_enable = fsl_emb_pmu_enable,
+ .pmu_disable = fsl_emb_pmu_disable,
+ .event_init = fsl_emb_pmu_event_init,
+ .add = fsl_emb_pmu_add,
+ .del = fsl_emb_pmu_del,
+ .start = fsl_emb_pmu_start,
+ .stop = fsl_emb_pmu_stop,
+ .read = fsl_emb_pmu_read,
+};
+
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
s64 prev, delta, left;
int record = 0;
+ if (event->hw.state & PERF_HES_STOPPED) {
+ write_pmc(event->hw.idx, 0);
+ return;
+ }
+
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
@@ -562,6 +601,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
val = 0x80000000LL - left;
}
+ write_pmc(event->hw.idx, val);
+ local64_set(&event->hw.prev_count, val);
+ local64_set(&event->hw.period_left, left);
+ perf_event_update_userpage(event);
+
/*
* Finally record data if requested.
*/
@@ -571,23 +615,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
- if (perf_event_overflow(event, nmi, &data, regs)) {
- /*
- * Interrupts are coming too fast - throttle them
- * by setting the event to 0, so it will be
- * at least 2^30 cycles until the next interrupt
- * (assuming each event counts at most 2 counts
- * per cycle).
- */
- val = 0;
- left = ~0ULL >> 1;
- }
+ if (perf_event_overflow(event, nmi, &data, regs))
+ fsl_emb_pmu_stop(event, 0);
}
-
- write_pmc(event->hw.idx, val);
- local64_set(&event->hw.prev_count, val);
- local64_set(&event->hw.period_left, left);
- perf_event_update_userpage(event);
}
static void perf_event_interrupt(struct pt_regs *regs)
@@ -651,5 +681,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
+ perf_pmu_register(&fsl_emb_pmu);
+
return 0;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 8533b3b83f5d..54888eb10c3b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
-#include <linux/perf_event.h>
+#include <linux/irq_work.h>
#include <asm/trace.h>
#include <asm/io.h>
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_perf_event_pending(void)
+static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
- : "i" (offsetof(struct paca_struct, perf_event_pending)));
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
-static inline void set_perf_event_pending_flag(void)
+static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
+ "i" (offsetof(struct paca_struct, irq_work_pending)));
}
-static inline void clear_perf_event_pending(void)
+static inline void clear_irq_work_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
- "i" (offsetof(struct paca_struct, perf_event_pending)));
+ "i" (offsetof(struct paca_struct, irq_work_pending)));
}
#else /* 32-bit */
-DEFINE_PER_CPU(u8, perf_event_pending);
+DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
-#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
-#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
+#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
+#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
+#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
#endif /* 32 vs 64 bit */
-void set_perf_event_pending(void)
+void set_irq_work_pending(void)
{
preempt_disable();
- set_perf_event_pending_flag();
+ set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
-#else /* CONFIG_PERF_EVENTS */
+#else /* CONFIG_IRQ_WORK */
-#define test_perf_event_pending() 0
-#define clear_perf_event_pending()
+#define test_irq_work_pending() 0
+#define clear_irq_work_pending()
-#endif /* CONFIG_PERF_EVENTS */
+#endif /* CONFIG_IRQ_WORK */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
- if (test_perf_event_pending()) {
- clear_perf_event_pending();
- perf_event_do_pending();
+ if (test_irq_work_pending()) {
+ clear_irq_work_pending();
+ irq_work_run();
}
#ifdef CONFIG_PPC_ISERIES
diff --git a/arch/powerpc/platforms/512x/clock.c b/arch/powerpc/platforms/512x/clock.c
index 5b243bd3eb3b..3dc2a8d262b8 100644
--- a/arch/powerpc/platforms/512x/clock.c
+++ b/arch/powerpc/platforms/512x/clock.c
@@ -57,7 +57,7 @@ static struct clk *mpc5121_clk_get(struct device *dev, const char *id)
int id_match = 0;
if (dev == NULL || id == NULL)
- return NULL;
+ return clk;
mutex_lock(&clocks_mutex);
list_for_each_entry(p, &clocks, node) {
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 45c0cb9b67e6..18c104820198 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -99,7 +99,7 @@ static void __init efika_pcisetup(void)
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %s\n", pcictrl->full_name);
- return;
+ goto out_put;
}
if (bus_range[1] == bus_range[0])
@@ -111,12 +111,12 @@ static void __init efika_pcisetup(void)
printk(" controlled by %s\n", pcictrl->full_name);
printk("\n");
- hose = pcibios_alloc_controller(of_node_get(pcictrl));
+ hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %s\n",
pcictrl->full_name);
- return;
+ goto out_put;
}
hose->first_busno = bus_range[0];
@@ -124,6 +124,9 @@ static void __init efika_pcisetup(void)
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
+ return;
+out_put:
+ of_node_put(pcictrl);
}
#else
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 6e905314ad5d..41f3a7eda1de 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -325,12 +325,16 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
- /* wait at lease 1 us */
- udelay(2);
+ /* wait for 1 us */
+ udelay(1);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
+ /* wait at least 200ns */
+ /* 7 ~= (200ns * timebase) / ns2sec */
+ __delay(7);
+
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 97085530aa63..e3e379c6caa7 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -310,9 +310,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
}
static struct irq_chip msic_irq_chip = {
- .mask = mask_msi_irq,
- .unmask = unmask_msi_irq,
- .shutdown = unmask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+ .irq_shutdown = mask_msi_irq,
.name = "AXON-MSI",
};
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 93834b0d8272..67e2c4bdac8f 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -243,7 +243,7 @@ static unsigned int xics_startup(unsigned int virq)
* at that level, so we do it here by hand.
*/
if (irq_to_desc(virq)->msi_desc)
- unmask_msi_irq(virq);
+ unmask_msi_irq(irq_get_irq_data(virq));
/* unmask it */
xics_unmask_irq(virq);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 87991d3abbab..bdbd896c89d8 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -51,8 +51,8 @@ static void fsl_msi_end_irq(unsigned int virq)
}
static struct irq_chip fsl_msi_chip = {
- .mask = mask_msi_irq,
- .unmask = unmask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
.ack = fsl_msi_end_irq,
.name = "FSL-MSI",
};
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 3b6a9a43718f..320ad5a9a25d 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -39,24 +39,24 @@
static struct mpic *msi_mpic;
-static void mpic_pasemi_msi_mask_irq(unsigned int irq)
+static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
{
- pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq);
- mask_msi_irq(irq);
- mpic_mask_irq(irq);
+ pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
+ mask_msi_irq(data);
+ mpic_mask_irq(data->irq);
}
-static void mpic_pasemi_msi_unmask_irq(unsigned int irq)
+static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
{
- pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq);
- mpic_unmask_irq(irq);
- unmask_msi_irq(irq);
+ pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
+ mpic_unmask_irq(data->irq);
+ unmask_msi_irq(data);
}
static struct irq_chip mpic_pasemi_msi_chip = {
- .shutdown = mpic_pasemi_msi_mask_irq,
- .mask = mpic_pasemi_msi_mask_irq,
- .unmask = mpic_pasemi_msi_unmask_irq,
+ .irq_shutdown = mpic_pasemi_msi_mask_irq,
+ .irq_mask = mpic_pasemi_msi_mask_irq,
+ .irq_unmask = mpic_pasemi_msi_unmask_irq,
.eoi = mpic_end_irq,
.set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity,
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index bcbfe79c704b..a2b028b4a202 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -23,22 +23,22 @@
/* A bit ugly, can we get this from the pci_dev somehow? */
static struct mpic *msi_mpic;
-static void mpic_u3msi_mask_irq(unsigned int irq)
+static void mpic_u3msi_mask_irq(struct irq_data *data)
{
- mask_msi_irq(irq);
- mpic_mask_irq(irq);
+ mask_msi_irq(data);
+ mpic_mask_irq(data->irq);
}
-static void mpic_u3msi_unmask_irq(unsigned int irq)
+static void mpic_u3msi_unmask_irq(struct irq_data *data)
{
- mpic_unmask_irq(irq);
- unmask_msi_irq(irq);
+ mpic_unmask_irq(data->irq);
+ unmask_msi_irq(data);
}
static struct irq_chip mpic_u3msi_chip = {
- .shutdown = mpic_u3msi_mask_irq,
- .mask = mpic_u3msi_mask_irq,
- .unmask = mpic_u3msi_unmask_irq,
+ .irq_shutdown = mpic_u3msi_mask_irq,
+ .irq_mask = mpic_u3msi_mask_irq,
+ .irq_unmask = mpic_u3msi_unmask_irq,
.eoi = mpic_end_irq,
.set_type = mpic_set_irq_type,
.set_affinity = mpic_set_affinity,
OpenPOWER on IntegriCloud