summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/perf_event.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-03-06 11:54:10 +0000
committerWill Deacon <will.deacon@arm.com>2015-03-24 15:09:47 +0000
commitd5efd9cc9cf2e422d064c912c7d5d985f52c1b2c (patch)
treeec4a29ce1df9016b48597888d55e925f3bdf4ac5 /arch/arm64/kernel/perf_event.c
parent71bbf038eaa44a80dd6df0da7c708d4618172fe0 (diff)
downloadblackbird-op-linux-d5efd9cc9cf2e422d064c912c7d5d985f52c1b2c.tar.gz
blackbird-op-linux-d5efd9cc9cf2e422d064c912c7d5d985f52c1b2c.zip
arm64: pmu: add support for interrupt-affinity property
Historically, the PMU devicetree bindings have expected SPIs to be listed in order of *logical* CPU number. This is problematic for bootloaders, especially when the boot CPU (logical ID 0) isn't listed first in the devicetree. This patch adds a new optional property, interrupt-affinity, to the PMU node which allows the interrupt affinity to be described using a list of phandled to CPU nodes, with each entry in the list corresponding to the SPI at the same index in the interrupts property. Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel/perf_event.c')
-rw-r--r--arch/arm64/kernel/perf_event.c57
1 files changed, 53 insertions, 4 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 68a74151fa6c..195991dadc37 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,8 +25,10 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -405,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
free_percpu_irq(irq, &cpu_hw_events);
} else {
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ int cpu = i;
+
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
+ if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq > 0)
@@ -459,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
} else {
for (i = 0; i < irqs; ++i) {
+ int cpu = i;
+
err = 0;
irq = platform_get_irq(pmu_device, i);
if (irq <= 0)
continue;
+ if (armpmu->irq_affinity)
+ cpu = armpmu->irq_affinity[i];
+
/*
* If we have a single PMU interrupt that we can't shift,
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
+ irq, cpu);
continue;
}
@@ -485,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ cpumask_set_cpu(cpu, &armpmu->active_irqs);
}
}
@@ -1298,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = {
static int armpmu_device_probe(struct platform_device *pdev)
{
+ int i, *irqs;
+
if (!cpu_pmu)
return -ENODEV;
+ irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ for (i = 0; i < pdev->num_resources; ++i) {
+ struct device_node *dn;
+ int cpu;
+
+ dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
+ i);
+ if (!dn) {
+ pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
+ of_node_full_name(dn), i);
+ break;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ break;
+
+ of_node_put(dn);
+ if (cpu >= nr_cpu_ids) {
+ pr_warn("Failed to find logical CPU for %s\n",
+ dn->name);
+ break;
+ }
+
+ irqs[i] = cpu;
+ }
+
+ if (i == pdev->num_resources)
+ cpu_pmu->irq_affinity = irqs;
+ else
+ kfree(irqs);
+
cpu_pmu->plat_device = pdev;
return 0;
}
OpenPOWER on IntegriCloud