summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 17:33:44 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-09 17:33:44 -0700
commitc23190c0bf1236e1eb5521a8b10d0102fbc1338c (patch)
treeeb23225a121fb56a58d69d34e7338bfe81ede644 /arch
parentfc335c1b68c68f626f07f1819e57d112d666bbba (diff)
parent45ed695ac10a23cb4e60a3e0b68b3f21a8670670 (diff)
downloadblackbird-op-linux-c23190c0bf1236e1eb5521a8b10d0102fbc1338c.tar.gz
blackbird-op-linux-c23190c0bf1236e1eb5521a8b10d0102fbc1338c.zip
Merge tag 'trace-ipi-tracepoints' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull IPI tracepoints for ARM from Steven Rostedt: "Nicolas Pitre added generic tracepoints for tracing IPIs and updated the arm and arm64 architectures. It required some minor updates to the generic tracepoint system, so it had to wait for me to implement them" * tag 'trace-ipi-tracepoints' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: ARM64: add IPI tracepoints ARM: add IPI tracepoints tracepoint: add generic tracepoint definitions for IPI tracing tracing: Do not do anything special with tracepoint_string when tracing is disabled
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/smp.c70
-rw-r--r--arch/arm64/kernel/smp.c65
2 files changed, 81 insertions, 54 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7c4fada440f0..9388a3d479e1 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -47,6 +47,9 @@
#include <asm/mach/arch.h>
#include <asm/mpu.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -430,38 +433,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- if (!smp_cross_call)
- smp_cross_call = fn;
-}
-
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+ if (!__smp_cross_call)
+ __smp_cross_call = fn;
}
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (is_smp())
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
-}
-#endif
-
-static const char *ipi_types[NR_IPI] = {
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
#define S(x,s) [x] = s
S(IPI_WAKEUP, "CPU wakeup interrupts"),
S(IPI_TIMER, "Timer broadcast interrupts"),
@@ -473,6 +453,12 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_COMPLETION, "completion interrupts"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
@@ -499,6 +485,29 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_WAKEUP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (is_smp())
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -556,8 +565,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr < NR_IPI)
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_WAKEUP:
@@ -612,6 +623,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 3e2f5ebbf63e..474339718105 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -51,6 +51,9 @@
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -313,8 +316,6 @@ void __init smp_prepare_boot_cpu(void)
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -469,32 +470,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
+static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
- smp_cross_call = fn;
+ __smp_cross_call = fn;
}
-void arch_send_call_function_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_CALL_FUNC);
-}
-
-void arch_send_call_function_single_ipi(int cpu)
-{
- smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
-}
-
-#ifdef CONFIG_IRQ_WORK
-void arch_irq_work_raise(void)
-{
- if (smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
-}
-#endif
-
-static const char *ipi_types[NR_IPI] = {
-#define S(x,s) [x - IPI_RESCHEDULE] = s
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+#define S(x,s) [x] = s
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
@@ -503,12 +487,18 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_IRQ_WORK, "IRQ work interrupts"),
};
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+ trace_ipi_raise(target, ipi_types[ipinr]);
+ __smp_cross_call(target, ipinr);
+}
+
void show_ipi_list(struct seq_file *p, int prec)
{
unsigned int cpu, i;
for (i = 0; i < NR_IPI; i++) {
- seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : "");
for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
@@ -528,6 +518,24 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ if (__smp_cross_call)
+ smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+}
+#endif
+
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
@@ -559,8 +567,10 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
unsigned int cpu = smp_processor_id();
struct pt_regs *old_regs = set_irq_regs(regs);
- if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI)
- __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]);
+ if ((unsigned)ipinr < NR_IPI) {
+ trace_ipi_entry(ipi_types[ipinr]);
+ __inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ }
switch (ipinr) {
case IPI_RESCHEDULE:
@@ -605,6 +615,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
}
+
+ if ((unsigned)ipinr < NR_IPI)
+ trace_ipi_exit(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
OpenPOWER on IntegriCloud